Compare commits

..

1 Commits

484 changed files with 8648 additions and 23114 deletions

View File

@@ -76,6 +76,9 @@ jobs:
latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.gpu-driver }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
@@ -100,7 +103,7 @@ jobs:
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
# cache-from: |
# type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
# type=gha,scope=main-${{ matrix.gpu-driver }}
# cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
type=gha,scope=main-${{ matrix.gpu-driver }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}

View File

@@ -13,67 +13,52 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
git
# Install `uv` for package management
COPY --from=ghcr.io/astral-sh/uv:0.6.0 /uv /uvx /bin/
COPY --from=ghcr.io/astral-sh/uv:0.5.5 /uv /uvx /bin/
ENV VIRTUAL_ENV=/opt/venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ENV INVOKEAI_SRC=/opt/invokeai
ENV PYTHON_VERSION=3.11
ENV UV_PYTHON=3.11
ENV UV_COMPILE_BYTECODE=1
ENV UV_LINK_MODE=copy
ENV UV_PROJECT_ENVIRONMENT="$VIRTUAL_ENV"
ENV UV_INDEX="https://download.pytorch.org/whl/cu124"
ARG GPU_DRIVER=cuda
ARG TARGETPLATFORM="linux/amd64"
# unused but available
ARG BUILDPLATFORM
# Switch to the `ubuntu` user to work around dependency issues with uv-installed python
RUN mkdir -p ${VIRTUAL_ENV} && \
mkdir -p ${INVOKEAI_SRC} && \
chmod -R a+w /opt && \
mkdir ~ubuntu/.cache && chown ubuntu: ~ubuntu/.cache
chmod -R a+w /opt
USER ubuntu
# Install python
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
uv python install ${PYTHON_VERSION}
# Install python and create the venv
RUN uv python install ${PYTHON_VERSION} && \
uv venv --relocatable --prompt "invoke" --python ${PYTHON_VERSION} ${VIRTUAL_ENV}
WORKDIR ${INVOKEAI_SRC}
COPY invokeai ./invokeai
COPY pyproject.toml ./
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
# bind-mount instead of copy to defer adding sources to the image until next layer.
#
# Editable mode helps use the same image for development:
# the local working copy can be bind-mounted into the image
# at path defined by ${INVOKEAI_SRC}
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
# x86_64/CUDA is the default
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
--mount=type=bind,source=invokeai/version,target=invokeai/version \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
UV_INDEX="https://download.pytorch.org/whl/cpu"; \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then \
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
else \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
fi && \
uv sync --no-install-project
# Now that the bulk of the dependencies have been installed, copy in the project files that change more frequently.
COPY invokeai invokeai
COPY pyproject.toml .
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
UV_INDEX="https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then \
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; \
fi && \
uv sync
uv pip install --python ${PYTHON_VERSION} $extra_index_url_arg -e "."
#### Build the Web UI ------------------------------------
FROM docker.io/node:22-slim AS web-builder
FROM node:20-slim AS web-builder
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack use pnpm@8.x
@@ -113,7 +98,6 @@ RUN apt update && apt install -y --no-install-recommends \
ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv
ENV UV_PROJECT_ENVIRONMENT="$VIRTUAL_ENV"
ENV PYTHON_VERSION=3.11
ENV INVOKEAI_ROOT=/invokeai
ENV INVOKEAI_HOST=0.0.0.0
@@ -125,7 +109,7 @@ ENV CONTAINER_GID=${CONTAINER_GID:-1000}
# Install `uv` for package management
# and install python for the ubuntu user (expected to exist on ubuntu >=24.x)
# this is too tiny to optimize with multi-stage builds, but maybe we'll come back to it
COPY --from=ghcr.io/astral-sh/uv:0.6.0 /uv /uvx /bin/
COPY --from=ghcr.io/astral-sh/uv:0.5.5 /uv /uvx /bin/
USER ubuntu
RUN uv python install ${PYTHON_VERSION}
USER root

View File

@@ -1,50 +1,41 @@
# Release Process
The Invoke application is published as a python package on [PyPI]. This includes both a source distribution and built distribution (a wheel).
The app is published in twice, in different build formats.
Most users install it with the [Launcher](https://github.com/invoke-ai/launcher/), others with `pip`.
The launcher uses GitHub as the source of truth for available releases.
## Broad Strokes
- Merge all changes and bump the version in the codebase.
- Tag the release commit.
- Wait for the release workflow to complete.
- Approve the PyPI publish jobs.
- Write GH release notes.
- A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build.
- An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs.
## General Prep
Make a developer call-out for PRs to merge. Merge and test things out. Bump the version by editing `invokeai/version/invokeai_version.py`.
Make a developer call-out for PRs to merge. Merge and test things out.
While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build.
## Release Workflow
The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
It is triggered on **tag push**, when the tag matches `v*`.
It is triggered on **tag push**, when the tag matches `v*`. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same.
> Because commits are reference-counted, it is safe to create a release branch, tag it, let the workflow run, then delete the branch. So long as the tag exists, that commit will exist.
### Triggering the Workflow
Ensure all commits that should be in the release are merged, and you have pulled them locally.
Run `make tag-release` to tag the current commit and kick off the workflow.
Double-check that you have checked out the commit that will represent the release (typically the latest commit on `main`).
Run `make tag-release` to tag the current commit and kick off the workflow. You will be prompted to provide a message - use the version specifier.
If this version's tag already exists for some reason (maybe you had to make a last minute change), the script will overwrite it.
> In case you cannot use the Make target, the release may also be dispatched [manually] via GH.
The release may also be dispatched [manually].
### Workflow Jobs and Process
The workflow consists of a number of concurrently-run checks and tests, then two final publish jobs.
The workflow consists of a number of concurrently-run jobs, and two final publish jobs.
The publish jobs require manual approval and are only run if the other jobs succeed.
#### `check-version` Job
This job ensures that the `invokeai` python package version specifier matches the tag for the release. The version specifier is pulled from the `__version__` variable in `invokeai/version/invokeai_version.py`.
This job checks that the git ref matches the app version. It matches the ref against the `__version__` variable in `invokeai/version/invokeai_version.py`.
When the workflow is triggered by tag push, the ref is the tag. If the workflow is run manually, the ref is the target selected from the **Use workflow from** dropdown.
This job uses [samuelcolvin/check-python-version].
@@ -52,52 +43,62 @@ This job uses [samuelcolvin/check-python-version].
#### Check and Test Jobs
Next, these jobs run and must pass. They are the same jobs that are run for every PR.
- **`python-tests`**: runs `pytest` on matrix of platforms
- **`python-checks`**: runs `ruff` (format and lint)
- **`frontend-tests`**: runs `vitest`
- **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports)
- **`typegen-checks`**: ensures the frontend and backend types are synced
> **TODO** We should add `mypy` or `pyright` to the **`check-python`** job.
> **TODO** We should add an end-to-end test job that generates an image.
#### `build-installer` Job
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts:
- **`dist`**: the python distribution, to be published on PyPI
- **`InvokeAI-installer-${VERSION}.zip`**: the legacy install scripts
You don't need to download either of these files.
> The legacy install scripts are no longer used, but we haven't updated the workflow to skip building them.
- **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release
#### Sanity Check & Smoke Test
At this point, the release workflow pauses as the remaining publish jobs require approval.
At this point, the release workflow pauses as the remaining publish jobs require approval. Time to test the installer.
It's possible to test the python package before it gets published to PyPI. We've never had problems with it, so it's not necessary to do this.
Because the installer pulls from PyPI, and we haven't published to PyPI yet, you will need to install from the wheel:
But, if you want to be extra-super careful, here's how to test it:
- Download and unzip `dist.zip` and the installer from the **Summary** tab of the workflow
- Run the installer script using the `--wheel` CLI arg, pointing at the wheel:
- Download the `dist.zip` build artifact from the `build-installer` job
- Unzip it and find the wheel file
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/) - but instead of installing from PyPI, install from the wheel
- Test the app
```sh
./install.sh --wheel ../InvokeAI-4.0.0rc6-py3-none-any.whl
```
- Install to a temporary directory so you get the new user experience
- Download a model and generate
> The same wheel file is bundled in the installer and in the `dist` artifact, which is uploaded to PyPI. You should end up with the exactly the same installation as if the installer got the wheel from PyPI.
##### Something isn't right
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?) and start over.
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?).
Now you can start from the top:
- Fix the issues and PR the fixes per usual
- Get the PR approved and merged per usual
- Switch to `main` and pull in the fixes
- Run `make tag-release` to move the tag to `HEAD` (which has the fixes) and kick off the release workflow again
- Re-do the sanity check
#### PyPI Publish Jobs
The publish jobs will not run if any of the previous jobs fail.
The publish jobs will run if any of the previous jobs fail.
They use [GitHub environments], which are configured as [trusted publishers] on PyPI.
Both jobs require a @hipsterusername or @psychedelicious to approve them from the workflow's **Summary** tab.
Both jobs require a maintainer to approve them from the workflow's **Summary** tab.
- Click the **Review deployments** button
- Select the environment (either `testpypi` or `pypi` - typically you select both)
- Select the environment (either `testpypi` or `pypi`)
- Click **Approve and deploy**
> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
@@ -112,33 +113,46 @@ If there are no incidents, contact @hipsterusername or @lstein, who have owner a
Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment.
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release for some reason:
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release.
- Approve this publish job without approving the prod publish
- Let it finish
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/), making sure to use the Test PyPI index URL: `https://test.pypi.org/simple/`
- Test the app
If approved and successful, you could try out the test release like this:
```sh
# Create a new virtual environment
python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist
# Install the distribution from Test PyPI
pip install --index-url https://test.pypi.org/simple/ invokeai
# Run and test the app
invokeai-web
# Cleanup
deactivate
rm -rf ~/.test-invokeai-dist
```
#### `publish-pypi` Job
Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
It's a good idea to wait to approve and run this job until you have the release notes ready!
## Publish the GitHub Release with installer
## Prep and publish the GitHub Release
Once the release is published to PyPI, it's time to publish the GitHub release.
1. [Draft a new release] on GitHub, choosing the tag that triggered the release.
2. The **Generate release notes** button automatically inserts the changelog and new contributors. Make sure to select the correct tags for this release and the last stable release. GH often selects the wrong tags - do this manually.
3. Write the release notes, describing important changes. Contributions from community members should be shouted out. Use the GH-generated changelog to see all contributors. If there are Weblate translation updates, open that PR and shout out every person who contributed a translation.
4. Check **Set as a pre-release** if it's a pre-release.
5. Approve and wait for the `publish-pypi` job to finish if you haven't already.
6. Publish the GH release.
7. Post the release in Discord in the [releases](https://discord.com/channels/1020123559063990373/1149260708098359327) channel with abbreviated notes. For example:
> Invoke v5.7.0 (stable): <https://github.com/invoke-ai/InvokeAI/releases/tag/v5.7.0>
>
> It's a pretty big one - Form Builder, Metadata Nodes (thanks @SkunkWorxDark!), and much more.
8. Right click the message in releases and copy the link to it. Then, post that link in the [new-release-discussion](https://discord.com/channels/1020123559063990373/1149506274971631688) channel. For example:
> Invoke v5.7.0 (stable): <https://discord.com/channels/1020123559063990373/1149260708098359327/1344521744916021248>
1. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases.
1. Use `scripts/get_external_contributions.py` to get a list of external contributions to shout out in the release notes.
1. Upload the zip file created in **`build`** job into the Assets section of the release notes.
1. Check **Set as a pre-release** if it's a pre-release.
1. Check **Create a discussion for this release**.
1. Publish the release.
1. Announce the release in Discord.
> **TODO** Workflows can create a GitHub release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up.
## Manual Build
The `build installer` workflow can be dispatched manually. This is useful to test the installer for a given branch or tag.
No checks are run, it just builds.
## Manual Release
@@ -146,10 +160,12 @@ The `release` workflow can be dispatched manually. You must dispatch the workflo
This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above.
[InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases
[PyPI]: https://pypi.org/
[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
[Test PyPI]: https://test.pypi.org/
[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
[ncipollo/release-action]: https://github.com/ncipollo/release-action
[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
[trusted publishers]: https://docs.pypi.org/trusted-publishers/
[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version

View File

@@ -1,18 +1,26 @@
# FAQ
!!! info "How to Reinstall"
Many issues can be resolved by re-installing the application. You won't lose any data by re-installing. We suggest downloading the [latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) and using it to re-install the application. Consult the [installer guide](./installation/installer.md) for more information.
When you run the installer, you'll have an option to select the version to install. If you aren't ready to upgrade, you choose the current version to fix a broken install.
If the troubleshooting steps on this page don't get you up and running, please either [create an issue] or hop on [discord] for help.
## How to Install
Follow the [Quick Start guide](./installation/quick_start.md) to install Invoke.
You can download the latest installers [here](https://github.com/invoke-ai/InvokeAI/releases).
Note that any releases marked as _pre-release_ are in a beta state. You may experience some issues, but we appreciate your help testing those! For stable/reliable installations, please install the [latest release].
## Downloading models and using existing models
The Model Manager tab in the UI provides a few ways to install models, including using your already-downloaded models. You'll see a popup directing you there on first startup. For more information, see the [model install docs].
## Missing models after updating from v3
## Missing models after updating to v4
If you find some models are missing after updating from v3, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
If you find some models are missing after updating to v4, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
You can use the `Scan Folder` tab in the Model Manager UI to fix this. The models will either be in the old, now-unused `autoimport` folder, or your `models` folder.
@@ -29,27 +37,115 @@ Follow the same steps to scan and import the missing models.
## Slow generation
- Check the [system requirements] to ensure that your system is capable of generating images.
- Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
- Check that your generations are happening on your GPU (if you have one). Invoke will log what is being used for generation upon startup. If your GPU isn't used, re-install to and ensure you select the appropriate GPU option.
- If you are on Windows with an Nvidia GPU, you may have exceeded your GPU's VRAM capacity and are triggering Nvidia's "sysmem fallback". There's a guide to opt out of this behaviour in the [Low-VRAM mode guide](./features/low-vram.md).
- Check the `ram` setting in `invokeai.yaml`. This setting tells Invoke how much of your system RAM can be used to cache models. Having this too high or too low can slow things down. That said, it's generally safest to not set this at all and instead let Invoke manage it.
- Check the `vram` setting in `invokeai.yaml`. This setting tells Invoke how much of your GPU VRAM can be used to cache models. Counter-intuitively, if this setting is too high, Invoke will need to do a lot of shuffling of models as it juggles the VRAM cache and the currently-loaded model. The default value of 0.25 is generally works well for GPUs without 16GB or more VRAM. Even on a 24GB card, the default works well.
- Check that your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup. If your GPU isn't used, re-install to ensure the correct versions of torch get installed.
- If you are on Windows, you may have exceeded your GPU's VRAM capacity and are using slower [shared GPU memory](#shared-gpu-memory-windows). There's a guide to opt out of this behaviour in the linked FAQ entry.
## Shared GPU Memory (Windows)
!!! tip "Nvidia GPUs with driver 536.40"
This only applies to current Nvidia cards with driver 536.40 or later, released in June 2023.
When the GPU doesn't have enough VRAM for a task, Windows is able to allocate some of its CPU RAM to the GPU. This is much slower than VRAM, but it does allow the system to generate when it otherwise might no have enough VRAM.
When shared GPU memory is used, generation slows down dramatically - but at least it doesn't crash.
If you'd like to opt out of this behavior and instead get an error when you exceed your GPU's VRAM, follow [this guide from Nvidia](https://nvidia.custhelp.com/app/answers/detail/a_id/5490).
Here's how to get the python path required in the linked guide:
- Run `invoke.bat`.
- Select option 2 for developer console.
- At least one python path will be printed. Copy the path that includes your invoke installation directory (typically the first).
## Installer cannot find python (Windows)
Ensure that you checked **Add python.exe to PATH** when installing Python. This can be found at the bottom of the Python Installer window. If you already have Python installed, you can re-run the python installer, choose the Modify option and check the box.
## Triton error on startup
This can be safely ignored. Invoke doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
This can be safely ignored. InvokeAI doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
## Unable to Copy on Firefox
## Updated to 3.4.0 and xformers cant load C++/CUDA
Firefox does not allow Invoke to directly access the clipboard by default. As a result, you may be unable to use certain copy functions. You can fix this by configuring Firefox to allow access to write to the clipboard:
An issue occurred with your PyTorch update. Follow these steps to fix :
- Go to `about:config` and click the Accept button
- Search for `dom.events.asyncClipboard.clipboardItem`
- Set it to `true` by clicking the toggle button
- Restart Firefox
1. Launch your invoke.bat / invoke.sh and select the option to open the developer console
2. Run:`pip install ".[xformers]" --upgrade --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121`
- If you run into an error with `typing_extensions`, re-open the developer console and run: `pip install -U typing-extensions`
Note that v3.4.0 is an old, unsupported version. Please upgrade to the [latest release].
## Install failed and says `pip` is out of date
An out of date `pip` typically won't cause an installation to fail. The cause of the error can likely be found above the message that says `pip` is out of date.
If you saw that warning but the install went well, don't worry about it (but you can update `pip` afterwards if you'd like).
## Replicate image found online
Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc.
## OSErrors on Windows while installing dependencies
During a zip file installation or an update, installation stops with an error like this:
![broken-dependency-screenshot](./assets/troubleshooting/broken-dependency.png){:width="800px"}
To resolve this, re-install the application as described above.
## HuggingFace install failed due to invalid access token
Some HuggingFace models require you to authenticate using an [access token].
Invoke doesn't manage this token for you, but it's easy to set it up:
- Follow the instructions in the link above to create an access token. Copy it.
- Run the launcher script.
- Select option 2 (developer console).
- Paste the following command:
```sh
python -c "import huggingface_hub; huggingface_hub.login()"
```
- Paste your access token when prompted and press Enter. You won't see anything when you paste it.
- Type `n` if prompted about git credentials.
If you get an error, try the command again - maybe the token didn't paste correctly.
Once your token is set, start Invoke and try downloading the model again. The installer will automatically use the access token.
If the install still fails, you may not have access to the model.
## Stable Diffusion XL generation fails after trying to load UNet
InvokeAI is working in other respects, but when trying to generate
images with Stable Diffusion XL you get a "Server Error". The text log
in the launch window contains this log line above several more lines of
error messages:
`INFO --> Loading model:D:\LONG\PATH\TO\MODEL, type sdxl:main:unet`
This failure mode occurs when there is a network glitch during
downloading the very large SDXL model.
To address this, first go to the Model Manager and delete the
Stable-Diffusion-XL-base-1.X model. Then, click the HuggingFace tab,
paste the Repo ID stabilityai/stable-diffusion-xl-base-1.0 and install
the model.
## Package dependency conflicts during installation or update
If you have previously installed InvokeAI or another Stable Diffusion
package, the installer may occasionally pick up outdated libraries and
either the installer or `invoke` will fail with complaints about
library conflicts.
To resolve this, re-install the application as described above.
## Invalid configuration file
Everything seems to install ok, you get a `ValidationError` when starting up the app.
@@ -58,9 +154,64 @@ This is caused by an invalid setting in the `invokeai.yaml` configuration file.
Check the [configuration docs] for more detail about the settings and how to specify them.
## Out of Memory Errors
## `ModuleNotFoundError: No module named 'controlnet_aux'`
The models are large, VRAM is expensive, and you may find yourself faced with Out of Memory errors when generating images. Follow our [Low-VRAM mode guide](./features/low-vram.md) to configure Invoke to prevent these.
`controlnet_aux` is a dependency of Invoke and appears to have been packaged or distributed strangely. Sometimes, it doesn't install correctly. This is outside our control.
If you encounter this error, the solution is to remove the package from the `pip` cache and re-run the Invoke installer so a fresh, working version of `controlnet_aux` can be downloaded and installed:
- Run the Invoke launcher
- Choose the developer console option
- Run this command: `pip cache remove controlnet_aux`
- Close the terminal window
- Download and run the [installer][latest release], selecting your current install location
## Out of Memory Issues
The models are large, VRAM is expensive, and you may find yourself
faced with Out of Memory errors when generating images. Here are some
tips to reduce the problem:
!!! info "Optimizing for GPU VRAM"
=== "4GB VRAM GPU"
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
and derived models, provided that you do not use the NSFW checker. It won't be loaded unless you go into the UI settings and turn it on.
If you are on a CUDA-enabled GPU, we will automatically use xformers or torch-sdp to reduce VRAM requirements, though you can explicitly configure this. See the [configuration docs].
=== "6GB VRAM GPU"
This is a border case. Using the SD 1.5 series you should be able to
generate images up to 640x640 with the NSFW checker enabled, and up to
1024x1024 with it disabled.
If you run into persistent memory issues there are a series of
environment variables that you can set before launching InvokeAI that
alter how the PyTorch machine learning library manages memory. See
<https://pytorch.org/docs/stable/notes/cuda.html#memory-management> for
a list of these tweaks.
=== "12GB VRAM GPU"
This should be sufficient to generate larger images up to about 1280x1280.
## Checkpoint Models Load Slowly or Use Too Much RAM
The difference between diffusers models (a folder containing multiple
subfolders) and checkpoint models (a file ending with .safetensors or
.ckpt) is that InvokeAI is able to load diffusers models into memory
incrementally, while checkpoint models must be loaded all at
once. With very large models, or systems with limited RAM, you may
experience slowdowns and other memory-related issues when loading
checkpoint models.
To solve this, go to the Model Manager tab (the cube), select the
checkpoint model that's giving you trouble, and press the "Convert"
button in the upper right of your browser window. This will convert the
checkpoint into a diffusers model, after which loading should be
faster and less memory-intensive.
## Memory Leak (Linux)
@@ -102,6 +253,8 @@ Note the differences between memory allocated as chunks in an arena vs. memory a
[model install docs]: ./installation/models.md
[system requirements]: ./installation/requirements.md
[latest release]: https://github.com/invoke-ai/InvokeAI/releases/latest
[create an issue]: https://github.com/invoke-ai/InvokeAI/issues
[discord]: https://discord.gg/ZmtBAhwWhy
[configuration docs]: ./configuration.md
[access token]: https://huggingface.co/docs/hub/security-tokens#how-to-manage-user-access-tokens

View File

@@ -31,7 +31,6 @@ It is possible to fine-tune the settings for best performance or if you still ge
Low-VRAM mode involves 4 features, each of which can be configured or fine-tuned:
- Partial model loading (`enable_partial_loading`)
- PyTorch CUDA allocator config (`pytorch_cuda_alloc_conf`)
- Dynamic RAM and VRAM cache sizes (`max_cache_ram_gb`, `max_cache_vram_gb`)
- Working memory (`device_working_mem_gb`)
- Keeping a RAM weight copy (`keep_ram_copy_of_weights`)
@@ -52,16 +51,6 @@ As described above, you can enable partial model loading by adding this line to
enable_partial_loading: true
```
### PyTorch CUDA allocator config
The PyTorch CUDA allocator's behavior can be configured using the `pytorch_cuda_alloc_conf` config. Tuning the allocator configuration can help to reduce the peak reserved VRAM. The optimal configuration is dependent on many factors (e.g. device type, VRAM, CUDA driver version, etc.), but switching from PyTorch's native allocator to using CUDA's built-in allocator works well on many systems. To try this, add the following line to your `invokeai.yaml` file:
```yaml
pytorch_cuda_alloc_conf: "backend:cudaMallocAsync"
```
A more complete explanation of the available configuration options is [here](https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf).
### Dynamic RAM and VRAM cache sizes
Loading models from disk is slow and can be a major bottleneck for performance. Invoke uses two model caches - RAM and VRAM - to reduce loading from disk to a minimum.
@@ -86,26 +75,24 @@ But, if your GPU has enough VRAM to hold models fully, you might get a perf boos
# As an example, if your system has 32GB of RAM and no other heavy processes, setting the `max_cache_ram_gb` to 28GB
# might be a good value to achieve aggressive model caching.
max_cache_ram_gb: 28
# The default max cache VRAM size is adjusted dynamically based on the amount of available VRAM (taking into
# consideration the VRAM used by other processes).
# You can override the default value by setting `max_cache_vram_gb`.
# CAUTION: Most users should not manually set this value. See warning below.
max_cache_vram_gb: 16
# You can override the default value by setting `max_cache_vram_gb`. Note that this value takes precedence over the
# `device_working_mem_gb`.
# It is recommended to set the VRAM cache size to be as large as possible while leaving enough room for the working
# memory of the tasks you will be doing. For example, on a 24GB GPU that will be running unquantized FLUX without any
# auxiliary models, 18GB might be a good value.
max_cache_vram_gb: 18
```
!!! warning "Max safe value for `max_cache_vram_gb`"
!!! tip "Max safe value for `max_cache_vram_gb`"
Most users should not manually configure the `max_cache_vram_gb`. This configuration value takes precedence over the `device_working_mem_gb` and any operations that explicitly reserve additional working memory (e.g. VAE decode). As such, manually configuring it increases the likelihood of encountering out-of-memory errors.
For users who wish to configure `max_cache_vram_gb`, the max safe value can be determined by subtracting `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
To determine the max safe value for `max_cache_vram_gb`, subtract `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
For example, if you have a 12GB GPU, the max safe value for `max_cache_vram_gb` is `12GB - 3GB = 9GB`.
If you had increased `device_working_mem_gb` to 4GB, then the max safe value for `max_cache_vram_gb` is `12GB - 4GB = 8GB`.
Most users who override `max_cache_vram_gb` are doing so because they wish to use significantly less VRAM, and should be setting `max_cache_vram_gb` to a value significantly less than the 'max safe value'.
### Working memory
Invoke cannot use _all_ of your VRAM for model caching and loading. It requires some VRAM to use as working memory for various operations.

View File

@@ -88,13 +88,13 @@ The following commands vary depending on the version of Invoke being installed a
8. Install the `invokeai` package. Substitute the package specifier and version.
```sh
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --force-reinstall
uv pip install <PACKAGE_SPECIFIER>=<VERSION> --python 3.11 --python-preference only-managed --force-reinstall
```
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
```sh
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
uv pip install <PACKAGE_SPECIFIER>=<VERSION> --python 3.11 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
```
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:

View File

@@ -99,20 +99,6 @@ We recommend watching our [Getting Started Playlist](https://www.youtube.com/pla
- Using control layers and reference guides.
- Refining images with advanced workflows.
## Troubleshooting
If installation fails, retrying the install in Repair Mode may fix it. There's a checkbox to enable this on the Review step of the install flow.
If that doesn't fix it, [clearing the `uv` cache](https://docs.astral.sh/uv/reference/cli/#uv-cache-clean) might do the trick:
- Open and start the dev console (button at the bottom-left of the launcher).
- Run `uv cache clean`.
- Retry the installation. Enable Repair Mode for good measure.
If you are still unable to install, try installing to a different location and see if that works.
If you still have problems, ask for help on the Invoke [discord](https://discord.gg/ZmtBAhwWhy).
## Other Installation Methods
- You can install the Invoke application as a python package. See our [manual install](./manual.md) docs.

View File

@@ -4,9 +4,7 @@ Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
## Hardware
Hardware requirements vary significantly depending on model and image output size.
The requirements below are rough guidelines for best performance. GPUs with less VRAM typically still work, if a bit slower. Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
Hardware requirements vary significantly depending on model and image output size. The requirements below are rough guidelines.
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.

View File

@@ -7,7 +7,6 @@ from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy
from invokeai.app.services.boards.boards_common import BoardDTO
from invokeai.app.services.image_records.image_records_common import ImageCategory
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
@@ -88,9 +87,7 @@ async def delete_board(
try:
if include_images is True:
deleted_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
board_id=board_id,
categories=None,
is_intermediate=None,
board_id=board_id
)
ApiDependencies.invoker.services.images.delete_images_on_board(board_id=board_id)
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
@@ -101,9 +98,7 @@ async def delete_board(
)
else:
deleted_board_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
board_id=board_id,
categories=None,
is_intermediate=None,
board_id=board_id
)
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
return DeleteBoardResult(
@@ -147,14 +142,10 @@ async def list_boards(
)
async def list_all_board_image_names(
board_id: str = Path(description="The id of the board"),
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
) -> list[str]:
"""Gets a list of images for a board"""
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
board_id,
categories,
is_intermediate,
)
return image_names

View File

@@ -858,18 +858,6 @@ async def get_stats() -> Optional[CacheStats]:
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats
@model_manager_router.post(
"/empty_model_cache",
operation_id="empty_model_cache",
status_code=200,
)
async def empty_model_cache() -> None:
"""Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped."""
# Request 1000GB of room in order to force the cache to drop all models.
ApiDependencies.invoker.services.logger.info("Emptying model cache.")
ApiDependencies.invoker.services.model_manager.load.ram_cache.make_room(1000 * 2**30)
class HFTokenStatus(str, Enum):
VALID = "valid"
INVALID = "invalid"

View File

@@ -10,13 +10,11 @@ from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
Batch,
BatchStatus,
CancelAllExceptCurrentResult,
CancelByBatchIDsResult,
CancelByDestinationResult,
ClearResult,
EnqueueBatchResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
@@ -48,9 +46,7 @@ async def enqueue_batch(
) -> EnqueueBatchResult:
"""Processes a batch and enqueues the output graphs for execution."""
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
queue_id=queue_id, batch=batch, prepend=prepend
)
return ApiDependencies.invoker.services.session_queue.enqueue_batch(queue_id=queue_id, batch=batch, prepend=prepend)
@session_queue_router.get(
@@ -98,18 +94,6 @@ async def Pause(
return ApiDependencies.invoker.services.session_processor.pause()
@session_queue_router.put(
"/{queue_id}/cancel_all_except_current",
operation_id="cancel_all_except_current",
responses={200: {"model": CancelAllExceptCurrentResult}},
)
async def cancel_all_except_current(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> CancelAllExceptCurrentResult:
"""Immediately cancels all queue items except in-processing items"""
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
@session_queue_router.put(
"/{queue_id}/cancel_by_batch_ids",
operation_id="cancel_by_batch_ids",
@@ -138,19 +122,6 @@ async def cancel_by_destination(
)
@session_queue_router.put(
"/{queue_id}/retry_items_by_id",
operation_id="retry_items_by_id",
responses={200: {"model": RetryItemsResult}},
)
async def retry_items_by_id(
queue_id: str = Path(description="The queue id to perform this operation on"),
item_ids: list[int] = Body(description="The queue item ids to retry"),
) -> RetryItemsResult:
"""Immediately cancels all queue items with the given origin"""
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
@session_queue_router.put(
"/{queue_id}/clear",
operation_id="clear",

View File

@@ -1,8 +1,12 @@
import asyncio
import logging
import mimetypes
import socket
from contextlib import asynccontextmanager
from pathlib import Path
import torch
import uvicorn
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
@@ -11,7 +15,11 @@ from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi_events.handlers.local import local_handler
from fastapi_events.middleware import EventHandlerASGIMiddleware
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from torch.backends.mps import is_available as is_mps_available
# for PyCharm:
# noinspection PyUnresolvedReferences
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
import invokeai.frontend.web as web_dir
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
@@ -30,13 +38,31 @@ from invokeai.app.api.routers import (
from invokeai.app.api.sockets import SocketIO
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.custom_openapi import get_openapi_func
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
app_config = get_config()
if is_mps_available():
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
logger = InvokeAILogger.get_logger(config=app_config)
# fix for windows mimetypes registry entries being borked
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("text/css", ".css")
torch_device_name = TorchDevice.get_torch_device_name()
logger.info(f"Using torch device: {torch_device_name}")
loop = asyncio.new_event_loop()
# We may change the port if the default is in use, this global variable is used to store the port so that we can log
# the correct port when the server starts in the lifespan handler.
port = app_config.port
@asynccontextmanager
async def lifespan(app: FastAPI):
@@ -45,7 +71,7 @@ async def lifespan(app: FastAPI):
# Log the server address when it starts - in case the network log level is not high enough to see the startup log
proto = "https" if app_config.ssl_certfile else "http"
msg = f"Invoke running on {proto}://{app_config.host}:{app_config.port} (Press CTRL+C to quit)"
msg = f"Invoke running on {proto}://{app_config.host}:{port} (Press CTRL+C to quit)"
# Logging this way ignores the logger's log level and _always_ logs the message
record = logger.makeRecord(
@@ -160,3 +186,73 @@ except RuntimeError:
app.mount(
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
) # docs favicon is in here
def check_cudnn(logger: logging.Logger) -> None:
"""Check for cuDNN issues that could be causing degraded performance."""
if torch.backends.cudnn.is_available():
try:
# Note: At the time of writing (torch 2.2.1), torch.backends.cudnn.version() only raises an error the first
# time it is called. Subsequent calls will return the version number without complaining about a mismatch.
cudnn_version = torch.backends.cudnn.version()
logger.info(f"cuDNN version: {cudnn_version}")
except RuntimeError as e:
logger.warning(
"Encountered a cuDNN version issue. This may result in degraded performance. This issue is usually "
"caused by an incompatible cuDNN version installed in your python environment, or on the host "
f"system. Full error message:\n{e}"
)
def invoke_api() -> None:
def find_port(port: int) -> int:
"""Find a port not in use starting at given port"""
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
# https://github.com/WaylonWalker
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
if s.connect_ex(("localhost", port)) == 0:
return find_port(port=port + 1)
else:
return port
if app_config.dev_reload:
try:
import jurigged
except ImportError as e:
logger.error(
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.',
exc_info=e,
)
else:
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
global port
port = find_port(app_config.port)
if port != app_config.port:
logger.warn(f"Port {app_config.port} in use, using port {port}")
check_cudnn(logger)
config = uvicorn.Config(
app=app,
host=app_config.host,
port=port,
loop="asyncio",
log_level=app_config.log_level_network,
ssl_certfile=app_config.ssl_certfile,
ssl_keyfile=app_config.ssl_keyfile,
)
server = uvicorn.Server(config)
# replace uvicorn's loggers with InvokeAI's for consistent appearance
uvicorn_logger = InvokeAILogger.get_logger("uvicorn")
uvicorn_logger.handlers.clear()
for hdlr in logger.handlers:
uvicorn_logger.addHandler(hdlr)
loop.run_until_complete(server.serve())
if __name__ == "__main__":
invoke_api()

View File

@@ -1,5 +1,33 @@
import shutil
import sys
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from invokeai.app.services.config.config_default import get_config
custom_nodes_path = Path(get_config().custom_nodes_path)
custom_nodes_path.mkdir(parents=True, exist_ok=True)
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
# copy our custom nodes __init__.py to the custom nodes directory
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
# set the same permissions as the destination directory, in case our source is read-only,
# so that the files are user-writable
for p in custom_nodes_path.glob("**/*"):
p.chmod(custom_nodes_path.stat().st_mode)
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
if spec is None or spec.loader is None:
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
module = module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
# add core nodes to __all__
python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py"))
__all__ = [f.stem for f in python_files] # type: ignore

View File

@@ -44,6 +44,8 @@ if TYPE_CHECKING:
logger = InvokeAILogger.get_logger()
CUSTOM_NODE_PACK_SUFFIX = "__invokeai-custom-node"
class InvalidVersionError(ValueError):
pass
@@ -238,11 +240,6 @@ class BaseInvocation(ABC, BaseModel):
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
return signature(cls.invoke).return_annotation
@classmethod
def get_invocation_for_type(cls, invocation_type: str) -> BaseInvocation | None:
"""Gets the invocation class for a given invocation type."""
return cls.get_invocations_map().get(invocation_type)
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
@@ -449,27 +446,8 @@ def invocation(
if re.compile(r"^\S+$").match(invocation_type) is None:
raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"')
# The node pack is the module name - will be "invokeai" for built-in nodes
node_pack = cls.__module__.split(".")[0]
# Handle the case where an existing node is being clobbered by the one we are registering
if invocation_type in BaseInvocation.get_invocation_types():
clobbered_invocation = BaseInvocation.get_invocation_for_type(invocation_type)
# This should always be true - we just checked if the invocation type was in the set
assert clobbered_invocation is not None
clobbered_node_pack = clobbered_invocation.UIConfig.node_pack
if clobbered_node_pack == "invokeai":
# The node being clobbered is a core node
raise ValueError(
f'Cannot load node "{invocation_type}" from node pack "{node_pack}" - a core node with the same type already exists'
)
else:
# The node being clobbered is a custom node
raise ValueError(
f'Cannot load node "{invocation_type}" from node pack "{node_pack}" - a node with the same type already exists in node pack "{clobbered_node_pack}"'
)
raise ValueError(f'Invocation type "{invocation_type}" already exists')
validate_fields(cls.model_fields, invocation_type)
@@ -479,7 +457,8 @@ def invocation(
uiconfig["tags"] = tags
uiconfig["category"] = category
uiconfig["classification"] = classification
uiconfig["node_pack"] = node_pack
# The node pack is the module name - will be "invokeai" for built-in nodes
uiconfig["node_pack"] = cls.__module__.split(".")[0]
if version is not None:
try:

View File

@@ -64,50 +64,13 @@ class ImageBatchInvocation(BaseBatchInvocation):
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
images: list[ImageField] = InputField(
default=[],
min_length=1,
description="The images to batch over",
default=[], min_length=1, description="The images to batch over", input=Input.Direct
)
def invoke(self, context: InvocationContext) -> ImageOutput:
raise NotExecutableNodeError()
@invocation_output("image_generator_output")
class ImageGeneratorOutput(BaseInvocationOutput):
"""Base class for nodes that output a collection of boards"""
images: list[ImageField] = OutputField(description="The generated images")
class ImageGeneratorField(BaseModel):
pass
@invocation(
"image_generator",
title="Image Generator",
tags=["primitives", "board", "image", "batch", "special"],
category="primitives",
version="1.0.0",
classification=Classification.Special,
)
class ImageGenerator(BaseInvocation):
"""Generated a collection of images for use in a batched generation"""
generator: ImageGeneratorField = InputField(
description="The image generator.",
input=Input.Direct,
title="Generator Type",
)
def __init__(self):
raise NotExecutableNodeError()
def invoke(self, context: InvocationContext) -> ImageGeneratorOutput:
raise NotExecutableNodeError()
@invocation(
"string_batch",
title="String Batch",
@@ -120,9 +83,7 @@ class StringBatchInvocation(BaseBatchInvocation):
"""Create a batched generation, where the workflow is executed once for each string in the batch."""
strings: list[str] = InputField(
default=[],
min_length=1,
description="The strings to batch over",
default=[], min_length=1, description="The strings to batch over", input=Input.Direct
)
def invoke(self, context: InvocationContext) -> StringOutput:

View File

@@ -10,12 +10,10 @@ from pathlib import Path
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
loaded_packs: list[str] = []
failed_packs: list[str] = []
loaded_count = 0
custom_nodes_dir = Path(__file__).parent
for d in custom_nodes_dir.iterdir():
for d in Path(__file__).parent.iterdir():
# skip files
if not d.is_dir():
continue
@@ -49,16 +47,12 @@ for d in custom_nodes_dir.iterdir():
sys.modules[spec.name] = module
spec.loader.exec_module(module)
loaded_packs.append(module_name)
loaded_count += 1
except Exception:
failed_packs.append(module_name)
full_error = traceback.format_exc()
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
logger.error(f"Failed to load node pack {module_name}:\n{full_error}")
del init, module_name
loaded_count = len(loaded_packs)
if loaded_count > 0:
logger.info(
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_dir}: {', '.join(loaded_packs)}"
)
logger.info(f"Loaded {loaded_count} node packs from {Path(__file__).parent}")

View File

@@ -898,7 +898,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
### inpaint
mask, masked_latents, is_gradient_mask = self.prep_inpaint_mask(context, latents)
# NOTE: We used to identify inpainting models by inspecting the shape of the loaded UNet model weights. Now we
# NOTE: We used to identify inpainting models by inpecting the shape of the loaded UNet model weights. Now we
# use the ModelVariantType config. During testing, there was a report of a user with models that had an
# incorrect ModelVariantType value. Re-installing the model fixed the issue. If this issue turns out to be
# prevalent, we will have to revisit how we initialize the inpainting extensions.

View File

@@ -300,13 +300,6 @@ class BoundingBoxField(BaseModel):
raise ValueError(f"y_min ({self.y_min}) is greater than y_max ({self.y_max}).")
return self
def tuple(self) -> Tuple[int, int, int, int]:
"""
Returns the bounding box as a tuple suitable for use with PIL's `Image.crop()` method.
This method returns a tuple of the form (left, upper, right, lower) == (x_min, y_min, x_max, y_max).
"""
return (self.x_min, self.y_min, self.x_max, self.y_max)
class MetadataField(RootModel[dict[str, Any]]):
"""

View File

@@ -8,7 +8,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, TransformerField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import BaseModelType
@@ -21,9 +21,6 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
default=None, description=FieldDescriptions.transformer, title="FLUX Transformer"
)
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
t5_encoder: Optional[T5EncoderField] = OutputField(
default=None, description=FieldDescriptions.t5_encoder, title="T5 Encoder"
)
@invocation(
@@ -31,7 +28,7 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
title="FLUX LoRA",
tags=["lora", "model", "flux"],
category="model",
version="1.2.0",
version="1.1.0",
classification=Classification.Prototype,
)
class FluxLoRALoaderInvocation(BaseInvocation):
@@ -53,12 +50,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
description=FieldDescriptions.clip,
input=Input.Connection,
)
t5_encoder: T5EncoderField | None = InputField(
default=None,
title="T5 Encoder",
description=FieldDescriptions.t5_encoder,
input=Input.Connection,
)
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
lora_key = self.lora.key
@@ -71,8 +62,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
raise ValueError(f'LoRA "{lora_key}" already applied to transformer.')
if self.clip and any(lora.lora.key == lora_key for lora in self.clip.loras):
raise ValueError(f'LoRA "{lora_key}" already applied to CLIP encoder.')
if self.t5_encoder and any(lora.lora.key == lora_key for lora in self.t5_encoder.loras):
raise ValueError(f'LoRA "{lora_key}" already applied to T5 encoder.')
output = FluxLoRALoaderOutput()
@@ -93,14 +82,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
weight=self.weight,
)
)
if self.t5_encoder is not None:
output.t5_encoder = self.t5_encoder.model_copy(deep=True)
output.t5_encoder.loras.append(
LoRAField(
lora=self.lora,
weight=self.weight,
)
)
return output
@@ -110,14 +91,14 @@ class FluxLoRALoaderInvocation(BaseInvocation):
title="FLUX LoRA Collection Loader",
tags=["lora", "model", "flux"],
category="model",
version="1.3.0",
version="1.1.0",
classification=Classification.Prototype,
)
class FLUXLoRACollectionLoader(BaseInvocation):
"""Applies a collection of LoRAs to a FLUX transformer."""
loras: Optional[LoRAField | list[LoRAField]] = InputField(
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
loras: LoRAField | list[LoRAField] = InputField(
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
)
transformer: Optional[TransformerField] = InputField(
@@ -132,30 +113,13 @@ class FLUXLoRACollectionLoader(BaseInvocation):
description=FieldDescriptions.clip,
input=Input.Connection,
)
t5_encoder: T5EncoderField | None = InputField(
default=None,
title="T5 Encoder",
description=FieldDescriptions.t5_encoder,
input=Input.Connection,
)
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
output = FluxLoRALoaderOutput()
loras = self.loras if isinstance(self.loras, list) else [self.loras]
added_loras: list[str] = []
if self.transformer is not None:
output.transformer = self.transformer.model_copy(deep=True)
if self.clip is not None:
output.clip = self.clip.model_copy(deep=True)
if self.t5_encoder is not None:
output.t5_encoder = self.t5_encoder.model_copy(deep=True)
for lora in loras:
if lora is None:
continue
if lora.lora.key in added_loras:
continue
@@ -166,13 +130,14 @@ class FLUXLoRACollectionLoader(BaseInvocation):
added_loras.append(lora.lora.key)
if self.transformer is not None and output.transformer is not None:
if self.transformer is not None:
if output.transformer is None:
output.transformer = self.transformer.model_copy(deep=True)
output.transformer.loras.append(lora)
if self.clip is not None and output.clip is not None:
if self.clip is not None:
if output.clip is None:
output.clip = self.clip.model_copy(deep=True)
output.clip.loras.append(lora)
if self.t5_encoder is not None and output.t5_encoder is not None:
output.t5_encoder.loras.append(lora)
return output

View File

@@ -40,7 +40,7 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
title="Flux Main Model",
tags=["model", "flux"],
category="model",
version="1.0.5",
version="1.0.4",
classification=Classification.Prototype,
)
class FluxModelLoaderInvocation(BaseInvocation):
@@ -87,7 +87,7 @@ class FluxModelLoaderInvocation(BaseInvocation):
return FluxModelLoaderOutput(
transformer=TransformerField(transformer=transformer, loras=[]),
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder, loras=[]),
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
vae=VAEField(vae=vae),
max_seq_len=max_seq_lengths[transformer_config.config_path],
)

View File

@@ -19,7 +19,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.conditioner import HFEncoder
from invokeai.backend.model_manager.config import ModelFormat
from invokeai.backend.patches.layer_patcher import LayerPatcher
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
@@ -71,45 +71,13 @@ class FluxTextEncoderInvocation(BaseInvocation):
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
prompt = [self.prompt]
t5_encoder_info = context.models.load(self.t5_encoder.text_encoder)
t5_encoder_config = t5_encoder_info.config
assert t5_encoder_config is not None
with (
t5_encoder_info.model_on_device() as (cached_weights, t5_text_encoder),
context.models.load(self.t5_encoder.text_encoder) as t5_text_encoder,
context.models.load(self.t5_encoder.tokenizer) as t5_tokenizer,
ExitStack() as exit_stack,
):
assert isinstance(t5_text_encoder, T5EncoderModel)
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
# Determine if the model is quantized.
# If the model is quantized, then we need to apply the LoRA weights as sidecar layers. This results in
# slower inference than direct patching, but is agnostic to the quantization format.
if t5_encoder_config.format in [ModelFormat.T5Encoder, ModelFormat.Diffusers]:
model_is_quantized = False
elif t5_encoder_config.format in [
ModelFormat.BnbQuantizedLlmInt8b,
ModelFormat.BnbQuantizednf4b,
ModelFormat.GGUFQuantized,
]:
model_is_quantized = True
else:
raise ValueError(f"Unsupported model format: {t5_encoder_config.format}")
# Apply LoRA models to the T5 encoder.
# Note: We apply the LoRA after the encoder has been moved to its target device for faster patching.
exit_stack.enter_context(
LayerPatcher.apply_smart_model_patches(
model=t5_text_encoder,
patches=self._t5_lora_iterator(context),
prefix=FLUX_LORA_T5_PREFIX,
dtype=t5_text_encoder.dtype,
cached_weights=cached_weights,
force_sidecar_patching=model_is_quantized,
)
)
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
context.util.signal_progress("Running T5 encoder")
@@ -164,10 +132,3 @@ class FluxTextEncoderInvocation(BaseInvocation):
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info
def _t5_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in self.t5_encoder.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info

View File

@@ -41,11 +41,16 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision).
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
scaling_constant = 1090 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
# We add a 20% buffer to the working memory estimate to be safe.
working_memory = working_memory * 1.2
return int(working_memory)
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:

View File

@@ -21,7 +21,7 @@ class IdealSizeOutput(BaseInvocationOutput):
"ideal_size",
title="Ideal Size",
tags=["latents", "math", "ideal_size"],
version="1.0.4",
version="1.0.3",
)
class IdealSizeInvocation(BaseInvocation):
"""Calculates the ideal size for generation to avoid duplication"""
@@ -41,16 +41,11 @@ class IdealSizeInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
unet_config = context.models.get_config(self.unet.unet.key)
aspect = self.width / self.height
if unet_config.base == BaseModelType.StableDiffusion1:
dimension = 512
elif unet_config.base == BaseModelType.StableDiffusion2:
dimension: float = 512
if unet_config.base == BaseModelType.StableDiffusion2:
dimension = 768
elif unet_config.base in (BaseModelType.StableDiffusionXL, BaseModelType.Flux, BaseModelType.StableDiffusion3):
elif unet_config.base == BaseModelType.StableDiffusionXL:
dimension = 1024
else:
raise ValueError(f"Unsupported model type: {unet_config.base}")
dimension = dimension * self.multiplier
min_dimension = math.floor(dimension * 0.5)
model_area = dimension * dimension # hardcoded for now since all models are trained on square images

View File

@@ -13,7 +13,6 @@ from invokeai.app.invocations.baseinvocation import (
)
from invokeai.app.invocations.constants import IMAGE_MODES
from invokeai.app.invocations.fields import (
BoundingBoxField,
ColorField,
FieldDescriptions,
ImageField,
@@ -843,7 +842,7 @@ CHANNEL_FORMATS = {
"value",
],
category="image",
version="1.2.3",
version="1.2.2",
)
class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Add or subtract a value from a specific color channel of an image."""
@@ -853,22 +852,18 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
offset: int = InputField(default=0, ge=-255, le=255, description="The amount to adjust the channel by")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGBA")
pil_image = context.images.get_pil(self.image.image_name)
# extract the channel and mode from the input and reference tuple
mode = CHANNEL_FORMATS[self.channel][0]
channel_number = CHANNEL_FORMATS[self.channel][1]
# Convert PIL image to new format
converted_image = numpy.array(image.convert(mode)).astype(int)
converted_image = numpy.array(pil_image.convert(mode)).astype(int)
image_channel = converted_image[:, :, channel_number]
if self.channel == "Hue (HSV)":
# loop around the values because hue is special
image_channel = (image_channel + self.offset) % 256
else:
# Adjust the value, clipping to 0..255
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
# Adjust the value, clipping to 0..255
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
# Put the channel back into the image
converted_image[:, :, channel_number] = image_channel
@@ -876,10 +871,6 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
# Convert back to RGBA format and output
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
# restore the alpha channel
if self.channel != "Alpha (RGBA)":
pil_image.putalpha(image.getchannel("A"))
image_dto = context.images.save(image=pil_image)
return ImageOutput.build(image_dto)
@@ -907,7 +898,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
"value",
],
category="image",
version="1.2.3",
version="1.2.2",
)
class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Scale a specific color channel of an image."""
@@ -918,14 +909,14 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
invert_channel: bool = InputField(default=False, description="Invert the channel after scaling")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGBA")
pil_image = context.images.get_pil(self.image.image_name)
# extract the channel and mode from the input and reference tuple
mode = CHANNEL_FORMATS[self.channel][0]
channel_number = CHANNEL_FORMATS[self.channel][1]
# Convert PIL image to new format
converted_image = numpy.array(image.convert(mode)).astype(float)
converted_image = numpy.array(pil_image.convert(mode)).astype(float)
image_channel = converted_image[:, :, channel_number]
# Adjust the value, clipping to 0..255
@@ -941,10 +932,6 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
# Convert back to RGBA format and output
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
# restore the alpha channel
if self.channel != "Alpha (RGBA)":
pil_image.putalpha(image.getchannel("A"))
image_dto = context.images.save(image=pil_image)
return ImageOutput.build(image_dto)
@@ -1010,10 +997,10 @@ class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard):
@invocation(
"mask_from_id",
title="Mask from Segmented Image",
title="Mask from ID",
tags=["image", "mask", "id"],
category="image",
version="1.0.1",
version="1.0.0",
)
class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generate a mask for a particular color in an ID Map"""
@@ -1023,24 +1010,40 @@ class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
threshold: int = InputField(default=100, description="Threshold for color detection")
invert: bool = InputField(default=False, description="Whether or not to invert the mask")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, mode="RGBA")
def rgba_to_hex(self, rgba_color: tuple[int, int, int, int]):
r, g, b, a = rgba_color
hex_code = "#{:02X}{:02X}{:02X}{:02X}".format(r, g, b, int(a * 255))
return hex_code
np_color = numpy.array(self.color.tuple())
def id_to_mask(self, id_mask: Image.Image, color: tuple[int, int, int, int], threshold: int = 100):
if id_mask.mode != "RGB":
id_mask = id_mask.convert("RGB")
# Can directly just use the tuple but I'll leave this rgba_to_hex here
# incase anyone prefers using hex codes directly instead of the color picker
hex_color_str = self.rgba_to_hex(color)
rgb_color = numpy.array([int(hex_color_str[i : i + 2], 16) for i in (1, 3, 5)])
# Maybe there's a faster way to calculate this distance but I can't think of any right now.
color_distance = numpy.linalg.norm(image - np_color, axis=-1)
color_distance = numpy.linalg.norm(id_mask - rgb_color, axis=-1)
# Create a mask based on the threshold and the distance calculated above
binary_mask = (color_distance < self.threshold).astype(numpy.uint8) * 255
binary_mask = (color_distance < threshold).astype(numpy.uint8) * 255
# Convert the mask back to PIL
binary_mask_pil = Image.fromarray(binary_mask)
if self.invert:
binary_mask_pil = ImageOps.invert(binary_mask_pil)
return binary_mask_pil
image_dto = context.images.save(image=binary_mask_pil, image_category=ImageCategory.MASK)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name)
mask = self.id_to_mask(image, self.color.tuple(), self.threshold)
if self.invert:
mask = ImageOps.invert(mask)
image_dto = context.images.save(image=mask, image_category=ImageCategory.MASK)
return ImageOutput.build(image_dto)
@@ -1151,59 +1154,3 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
image_dto = context.images.save(image=noisy_image)
return ImageOutput.build(image_dto)
@invocation(
"crop_image_to_bounding_box",
title="Crop Image to Bounding Box",
category="image",
version="1.0.0",
tags=["image", "crop"],
classification=Classification.Beta,
)
class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels."""
image: ImageField = InputField(description="The image to crop")
bounding_box: BoundingBoxField | None = InputField(
default=None, description="The bounding box to crop the image to"
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name)
bounding_box = self.bounding_box.tuple() if self.bounding_box is not None else image.getbbox()
cropped_image = image.crop(bounding_box)
image_dto = context.images.save(image=cropped_image)
return ImageOutput.build(image_dto)
@invocation(
"paste_image_into_bounding_box",
title="Paste Image into Bounding Box",
category="image",
version="1.0.0",
tags=["image", "crop"],
classification=Classification.Beta,
)
class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Paste the source image into the target image at the given bounding box.
The source image must be the same size as the bounding box, and the bounding box must fit within the target image."""
source_image: ImageField = InputField(description="The image to paste")
target_image: ImageField = InputField(description="The image to paste into")
bounding_box: BoundingBoxField = InputField(description="The bounding box to paste the image into")
def invoke(self, context: InvocationContext) -> ImageOutput:
source_image = context.images.get_pil(self.source_image.image_name, mode="RGBA")
target_image = context.images.get_pil(self.target_image.image_name, mode="RGBA")
bounding_box = self.bounding_box.tuple()
target_image.paste(source_image, bounding_box, source_image)
image_dto = context.images.save(image=target_image)
return ImageOutput.build(image_dto)

View File

@@ -60,7 +60,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision). This estimate is accurate for both SD1 and SDXL.
element_size = 4 if self.fp32 else 2
scaling_constant = 2200 # Determined experimentally.
scaling_constant = 960 # Determined experimentally.
if use_tiling:
tile_size = self.tile_size
@@ -84,7 +84,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
working_memory += 250 * 2**20
return int(working_memory)
# We add 20% to the working memory estimate to be safe.
working_memory = int(working_memory * 1.2)
return working_memory
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:

View File

@@ -1,40 +0,0 @@
import shutil
import sys
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
def load_custom_nodes(custom_nodes_path: Path):
"""
Loads all custom nodes from the custom_nodes_path directory.
This function copies a custom __init__.py file to the custom_nodes_path directory, effectively turning it into a
python module.
The custom __init__.py file itself imports all the custom node packs as python modules from the custom_nodes_path
directory.
Then,the custom __init__.py file is programmatically imported using importlib. As it executes, it imports all the
custom node packs as python modules.
"""
custom_nodes_path.mkdir(parents=True, exist_ok=True)
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
# copy our custom nodes __init__.py to the custom nodes directory
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
# set the same permissions as the destination directory, in case our source is read-only,
# so that the files are user-writable
for p in custom_nodes_path.glob("**/*"):
p.chmod(custom_nodes_path.stat().st_mode)
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
if spec is None or spec.loader is None:
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
module = module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)

View File

@@ -2,22 +2,9 @@ import numpy as np
import torch
from PIL import Image
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
Classification,
InvocationContext,
invocation,
)
from invokeai.app.invocations.fields import (
BoundingBoxField,
ColorField,
ImageField,
InputField,
TensorField,
WithBoard,
WithMetadata,
)
from invokeai.app.invocations.primitives import BoundingBoxOutput, ImageOutput, MaskOutput
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, InvocationContext, invocation
from invokeai.app.invocations.fields import ImageField, InputField, TensorField, WithBoard, WithMetadata
from invokeai.app.invocations.primitives import ImageOutput, MaskOutput
from invokeai.backend.image_util.util import pil_to_np
@@ -86,7 +73,7 @@ class AlphaMaskToTensorInvocation(BaseInvocation):
title="Invert Tensor Mask",
tags=["conditioning"],
category="conditioning",
version="1.1.0",
version="1.0.0",
classification=Classification.Beta,
)
class InvertTensorMaskInvocation(BaseInvocation):
@@ -96,15 +83,6 @@ class InvertTensorMaskInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> MaskOutput:
mask = context.tensors.load(self.mask.tensor_name)
# Verify dtype and shape.
assert mask.dtype == torch.bool
assert mask.dim() in [2, 3]
# Unsqueeze the channel dimension if it is missing. The MaskOutput type expects a single channel.
if mask.dim() == 2:
mask = mask.unsqueeze(0)
inverted = ~mask
return MaskOutput(
@@ -223,48 +201,3 @@ class ApplyMaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
image_dto = context.images.save(image=masked_image)
return ImageOutput.build(image_dto)
WHITE = ColorField(r=255, g=255, b=255, a=255)
@invocation(
"get_image_mask_bounding_box",
title="Get Image Mask Bounding Box",
tags=["mask"],
category="mask",
version="1.0.0",
classification=Classification.Beta,
)
class GetMaskBoundingBoxInvocation(BaseInvocation):
"""Gets the bounding box of the given mask image."""
mask: ImageField = InputField(description="The mask to crop.")
margin: int = InputField(default=0, description="Margin to add to the bounding box.")
mask_color: ColorField = InputField(default=WHITE, description="Color of the mask in the image.")
def invoke(self, context: InvocationContext) -> BoundingBoxOutput:
mask = context.images.get_pil(self.mask.image_name, mode="RGBA")
mask_np = np.array(mask)
# Convert mask_color to RGBA tuple
mask_color_rgb = self.mask_color.tuple()
# Find the bounding box of the mask color
y, x = np.where(np.all(mask_np == mask_color_rgb, axis=-1))
if len(x) == 0 or len(y) == 0:
# No pixels found with the given color
return BoundingBoxOutput(bounding_box=BoundingBoxField(x_min=0, y_min=0, x_max=0, y_max=0))
left, upper, right, lower = x.min(), y.min(), x.max(), y.max()
# Add the margin
left = max(0, left - self.margin)
upper = max(0, upper - self.margin)
right = min(mask_np.shape[1], right + self.margin)
lower = min(mask_np.shape[0], lower + self.margin)
bounding_box = BoundingBoxField(x_min=left, y_min=upper, x_max=right, y_max=lower)
return BoundingBoxOutput(bounding_box=bounding_box)

View File

@@ -18,7 +18,6 @@ from invokeai.app.invocations.fields import (
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import StringOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
from invokeai.version.invokeai_version import __version__
@@ -276,34 +275,3 @@ class CoreMetadataInvocation(BaseInvocation):
return MetadataOutput(metadata=MetadataField.model_validate(as_dict))
model_config = ConfigDict(extra="allow")
@invocation(
"metadata_field_extractor",
title="Metadata Field Extractor",
tags=["metadata"],
category="metadata",
version="1.0.0",
classification=Classification.Deprecated,
)
class MetadataFieldExtractorInvocation(BaseInvocation):
"""Extracts the text value from an image's metadata given a key.
Raises an error if the image has no metadata or if the value is not a string (nesting not permitted)."""
image: ImageField = InputField(description="The image to extract metadata from")
key: str = InputField(description="The key in the image's metadata to extract the value from")
def invoke(self, context: InvocationContext) -> StringOutput:
image_name = self.image.image_name
metadata = context.images.get_metadata(image_name=image_name)
if not metadata:
raise ValueError(f"No metadata found on image {image_name}")
try:
val = metadata.root[self.key]
if not isinstance(val, str):
raise ValueError(f"Metadata at key '{self.key}' must be a string")
return StringOutput(value=val)
except KeyError as e:
raise ValueError(f"No key '{self.key}' found in the metadata for {image_name}") from e

File diff suppressed because it is too large Load Diff

View File

@@ -68,7 +68,6 @@ class CLIPField(BaseModel):
class T5EncoderField(BaseModel):
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
class VAEField(BaseModel):
@@ -206,7 +205,7 @@ class LoRALoaderInvocation(BaseInvocation):
lora_key = self.lora.key
if not context.models.exists(lora_key):
raise Exception(f"Unknown lora: {lora_key}!")
raise Exception(f"Unkown lora: {lora_key}!")
if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras):
raise Exception(f'LoRA "{lora_key}" already applied to unet')
@@ -257,12 +256,12 @@ class LoRASelectorInvocation(BaseInvocation):
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.1.0")
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.0.0")
class LoRACollectionLoader(BaseInvocation):
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
loras: Optional[LoRAField | list[LoRAField]] = InputField(
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
loras: LoRAField | list[LoRAField] = InputField(
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
)
unet: Optional[UNetField] = InputField(
default=None,
@@ -282,14 +281,7 @@ class LoRACollectionLoader(BaseInvocation):
loras = self.loras if isinstance(self.loras, list) else [self.loras]
added_loras: list[str] = []
if self.unet is not None:
output.unet = self.unet.model_copy(deep=True)
if self.clip is not None:
output.clip = self.clip.model_copy(deep=True)
for lora in loras:
if lora is None:
continue
if lora.lora.key in added_loras:
continue
@@ -300,10 +292,14 @@ class LoRACollectionLoader(BaseInvocation):
added_loras.append(lora.lora.key)
if self.unet is not None and output.unet is not None:
if self.unet is not None:
if output.unet is None:
output.unet = self.unet.model_copy(deep=True)
output.unet.loras.append(lora)
if self.clip is not None and output.clip is not None:
if self.clip is not None:
if output.clip is None:
output.clip = self.clip.model_copy(deep=True)
output.clip.loras.append(lora)
return output
@@ -403,13 +399,13 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
title="SDXL LoRA Collection Loader",
tags=["model"],
category="model",
version="1.1.0",
version="1.0.0",
)
class SDXLLoRACollectionLoader(BaseInvocation):
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
loras: Optional[LoRAField | list[LoRAField]] = InputField(
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
loras: LoRAField | list[LoRAField] = InputField(
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
)
unet: Optional[UNetField] = InputField(
default=None,
@@ -435,18 +431,7 @@ class SDXLLoRACollectionLoader(BaseInvocation):
loras = self.loras if isinstance(self.loras, list) else [self.loras]
added_loras: list[str] = []
if self.unet is not None:
output.unet = self.unet.model_copy(deep=True)
if self.clip is not None:
output.clip = self.clip.model_copy(deep=True)
if self.clip2 is not None:
output.clip2 = self.clip2.model_copy(deep=True)
for lora in loras:
if lora is None:
continue
if lora.lora.key in added_loras:
continue
@@ -457,13 +442,19 @@ class SDXLLoRACollectionLoader(BaseInvocation):
added_loras.append(lora.lora.key)
if self.unet is not None and output.unet is not None:
if self.unet is not None:
if output.unet is None:
output.unet = self.unet.model_copy(deep=True)
output.unet.loras.append(lora)
if self.clip is not None and output.clip is not None:
if self.clip is not None:
if output.clip is None:
output.clip = self.clip.model_copy(deep=True)
output.clip.loras.append(lora)
if self.clip2 is not None and output.clip2 is not None:
if self.clip2 is not None:
if output.clip2 is None:
output.clip2 = self.clip2.model_copy(deep=True)
output.clip2.loras.append(lora)
return output
@@ -481,7 +472,7 @@ class VAELoaderInvocation(BaseInvocation):
key = self.vae_model.key
if not context.models.exists(key):
raise Exception(f"Unknown vae: {key}!")
raise Exception(f"Unkown vae: {key}!")
return VAEOutput(vae=VAEField(vae=self.vae_model))

View File

@@ -265,9 +265,13 @@ class ImageInvocation(BaseInvocation):
image: ImageField = InputField(description="The image to load")
def invoke(self, context: InvocationContext) -> ImageOutput:
image_dto = context.images.get_dto(self.image.image_name)
image = context.images.get_pil(self.image.image_name)
return ImageOutput.build(image_dto=image_dto)
return ImageOutput(
image=ImageField(image_name=self.image.image_name),
width=image.width,
height=image.height,
)
@invocation(
@@ -412,7 +416,6 @@ class ColorInvocation(BaseInvocation):
class MaskOutput(BaseInvocationOutput):
"""A torch mask tensor."""
# shape: [1, H, W], dtype: bool
mask: TensorField = OutputField(description="The mask.")
width: int = OutputField(description="The width of the mask in pixels.")
height: int = OutputField(description="The height of the mask in pixels.")

View File

@@ -43,11 +43,16 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision).
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
scaling_constant = 1230 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
# We add a 20% buffer to the working memory estimate to be safe.
working_memory = working_memory * 1.2
return int(working_memory)
@torch.no_grad()

View File

@@ -99,6 +99,6 @@ class Sd3ModelLoaderInvocation(BaseInvocation):
transformer=TransformerField(transformer=transformer, loras=[]),
clip_l=CLIPField(tokenizer=tokenizer_l, text_encoder=clip_encoder_l, loras=[], skipped_layers=0),
clip_g=CLIPField(tokenizer=tokenizer_g, text_encoder=clip_encoder_g, loras=[], skipped_layers=0),
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder, loras=[]),
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder),
vae=VAEField(vae=vae),
)

View File

@@ -49,7 +49,7 @@ class SAMPointsField(BaseModel):
title="Segment Anything",
tags=["prompt", "segmentation"],
category="segmentation",
version="1.2.0",
version="1.1.0",
)
class SegmentAnythingInvocation(BaseInvocation):
"""Runs a Segment Anything Model."""
@@ -96,10 +96,8 @@ class SegmentAnythingInvocation(BaseInvocation):
# masks contains bool values, so we merge them via max-reduce.
combined_mask, _ = torch.stack(masks).max(dim=0)
# Unsqueeze the channel dimension.
combined_mask = combined_mask.unsqueeze(0)
mask_tensor_name = context.tensors.save(combined_mask)
_, height, width = combined_mask.shape
height, width = combined_mask.shape
return MaskOutput(mask=TensorField(tensor_name=mask_tensor_name), width=width, height=height)
@staticmethod

View File

@@ -9,6 +9,6 @@ def validate_weights(weights: Union[float, list[float]]) -> None:
def validate_begin_end_step(begin_step_percent: float, end_step_percent: float) -> None:
"""Validate that begin_step_percent is less than or equal to end_step_percent"""
if begin_step_percent > end_step_percent:
"""Validate that begin_step_percent is less than end_step_percent"""
if begin_step_percent >= end_step_percent:
raise ValueError("Begin step percent must be less than or equal to end step percent")

View File

@@ -1,82 +1,12 @@
import uvicorn
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
def get_app():
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
"""
from invokeai.app.api_app import app, loop
return app, loop
"""This is a wrapper around the main app entrypoint, to allow for CLI args to be parsed before running the app."""
def run_app() -> None:
"""The main entrypoint for the app."""
# Parse the CLI arguments.
# Before doing _anything_, parse CLI args!
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
InvokeAIArgs.parse_args()
# Load config.
app_config = get_config()
from invokeai.app.api_app import invoke_api
logger = InvokeAILogger.get_logger(config=app_config)
# Configure the torch CUDA memory allocator.
# NOTE: It is important that this happens before torch is imported.
if app_config.pytorch_cuda_alloc_conf:
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
# Import from startup_utils here to avoid importing torch before configure_torch_cuda_allocator() is called.
from invokeai.app.util.startup_utils import (
apply_monkeypatches,
check_cudnn,
enable_dev_reload,
find_open_port,
register_mime_types,
)
# Find an open port, and modify the config accordingly.
orig_config_port = app_config.port
app_config.port = find_open_port(app_config.port)
if orig_config_port != app_config.port:
logger.warning(f"Port {orig_config_port} is already in use. Using port {app_config.port}.")
# Miscellaneous startup tasks.
apply_monkeypatches()
register_mime_types()
if app_config.dev_reload:
enable_dev_reload()
check_cudnn(logger)
# Initialize the app and event loop.
app, loop = get_app()
# Load custom nodes. This must be done after importing the Graph class, which itself imports all modules from the
# invocations module. The ordering here is implicit, but important - we want to load custom nodes after all the
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path)
# Start the server.
config = uvicorn.Config(
app=app,
host=app_config.host,
port=app_config.port,
loop="asyncio",
log_level=app_config.log_level_network,
ssl_certfile=app_config.ssl_certfile,
ssl_keyfile=app_config.ssl_keyfile,
)
server = uvicorn.Server(config)
# replace uvicorn's loggers with InvokeAI's for consistent appearance
uvicorn_logger = InvokeAILogger.get_logger("uvicorn")
uvicorn_logger.handlers.clear()
for hdlr in logger.handlers:
uvicorn_logger.addHandler(hdlr)
loop.run_until_complete(server.serve())
invoke_api()

View File

@@ -1,8 +1,6 @@
from abc import ABC, abstractmethod
from typing import Optional
from invokeai.app.services.image_records.image_records_common import ImageCategory
class BoardImageRecordStorageBase(ABC):
"""Abstract base class for the one-to-many board-image relationship record storage."""
@@ -28,8 +26,6 @@ class BoardImageRecordStorageBase(ABC):
def get_all_board_image_names_for_board(
self,
board_id: str,
categories: list[ImageCategory] | None,
is_intermediate: bool | None,
) -> list[str]:
"""Gets all board images for a board, as a list of the image names."""
pass

View File

@@ -1,20 +1,23 @@
import sqlite3
import threading
from typing import Optional, cast
from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase
from invokeai.app.services.image_records.image_records_common import (
ImageCategory,
ImageRecord,
deserialize_image_record,
)
from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_lock: threading.RLock
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._cursor = self._conn.cursor()
def add_image_to_board(
self,
@@ -22,8 +25,8 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
image_name: str,
) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
INSERT INTO board_images (board_id, image_name)
VALUES (?, ?)
@@ -35,14 +38,16 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
def remove_image_from_board(
self,
image_name: str,
) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
DELETE FROM board_images
WHERE image_name = ?;
@@ -53,6 +58,8 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
def get_images_for_board(
self,
@@ -61,108 +68,96 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
limit: int = 10,
) -> OffsetPaginatedResults[ImageRecord]:
# TODO: this isn't paginated yet?
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT images.*
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE board_images.board_id = ?
ORDER BY board_images.updated_at DESC;
""",
(board_id,),
)
result = cast(list[sqlite3.Row], cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT images.*
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE board_images.board_id = ?
ORDER BY board_images.updated_at DESC;
""",
(board_id,),
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
cursor.execute(
"""--sql
SELECT COUNT(*) FROM images WHERE 1=1;
"""
)
count = cast(int, cursor.fetchone()[0])
self._cursor.execute(
"""--sql
SELECT COUNT(*) FROM images WHERE 1=1;
"""
)
count = cast(int, self._cursor.fetchone()[0])
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
def get_all_board_image_names_for_board(
self,
board_id: str,
categories: list[ImageCategory] | None,
is_intermediate: bool | None,
) -> list[str]:
params: list[str | bool] = []
# Base query is a join between images and board_images
stmt = """
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
AND board_images.board_id = ?
"""
params.append(board_id)
# Add the category filter
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
stmt += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
# Unpack the included categories into the query params
for c in category_strings:
params.append(c)
# Add the is_intermediate filter
if is_intermediate is not None:
stmt += """--sql
AND images.is_intermediate = ?
"""
params.append(is_intermediate)
# Put a ring on it
stmt += ";"
# Execute the query
cursor = self._conn.cursor()
cursor.execute(stmt, params)
result = cast(list[sqlite3.Row], cursor.fetchall())
image_names = [r[0] for r in result]
return image_names
def get_all_board_image_names_for_board(self, board_id: str) -> list[str]:
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT image_name
FROM board_images
WHERE board_id = ?;
""",
(board_id,),
)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
image_names = [r[0] for r in result]
return image_names
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
def get_board_for_image(
self,
image_name: str,
) -> Optional[str]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT board_id
FROM board_images
WHERE image_name = ?;
""",
(image_name,),
)
result = cursor.fetchone()
if result is None:
return None
return cast(str, result[0])
(image_name,),
)
result = self._cursor.fetchone()
if result is None:
return None
return cast(str, result[0])
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
def get_image_count_for_board(self, board_id: str) -> int:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT COUNT(*)
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE images.is_intermediate = FALSE
AND board_images.board_id = ?;
""",
(board_id,),
)
count = cast(int, cursor.fetchone()[0])
return count
(board_id,),
)
count = cast(int, self._cursor.fetchone()[0])
return count
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()

View File

@@ -1,8 +1,6 @@
from abc import ABC, abstractmethod
from typing import Optional
from invokeai.app.services.image_records.image_records_common import ImageCategory
class BoardImagesServiceABC(ABC):
"""High-level service for board-image relationship management."""
@@ -28,8 +26,6 @@ class BoardImagesServiceABC(ABC):
def get_all_board_image_names_for_board(
self,
board_id: str,
categories: list[ImageCategory] | None,
is_intermediate: bool | None,
) -> list[str]:
"""Gets all board images for a board, as a list of the image names."""
pass

View File

@@ -1,7 +1,6 @@
from typing import Optional
from invokeai.app.services.board_images.board_images_base import BoardImagesServiceABC
from invokeai.app.services.image_records.image_records_common import ImageCategory
from invokeai.app.services.invoker import Invoker
@@ -27,14 +26,8 @@ class BoardImagesService(BoardImagesServiceABC):
def get_all_board_image_names_for_board(
self,
board_id: str,
categories: list[ImageCategory] | None,
is_intermediate: bool | None,
) -> list[str]:
return self.__invoker.services.board_image_records.get_all_board_image_names_for_board(
board_id,
categories,
is_intermediate,
)
return self.__invoker.services.board_image_records.get_all_board_image_names_for_board(board_id)
def get_board_for_image(
self,

View File

@@ -1,4 +1,5 @@
import sqlite3
import threading
from typing import Union, cast
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
@@ -18,14 +19,20 @@ from invokeai.app.util.misc import uuid_string
class SqliteBoardRecordStorage(BoardRecordStorageBase):
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_lock: threading.RLock
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._cursor = self._conn.cursor()
def delete(self, board_id: str) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
DELETE FROM boards
WHERE board_id = ?;
@@ -33,9 +40,14 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
(board_id,),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise BoardRecordDeleteException from e
except Exception as e:
self._conn.rollback()
raise BoardRecordDeleteException from e
finally:
self._lock.release()
def save(
self,
@@ -43,8 +55,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
) -> BoardRecord:
try:
board_id = uuid_string()
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
INSERT OR IGNORE INTO boards (board_id, board_name)
VALUES (?, ?);
@@ -55,6 +67,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise BoardRecordSaveException from e
finally:
self._lock.release()
return self.get(board_id)
def get(
@@ -62,8 +76,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
board_id: str,
) -> BoardRecord:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT *
FROM boards
@@ -72,9 +86,12 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
(board_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
result = cast(Union[sqlite3.Row, None], self._cursor.fetchone())
except sqlite3.Error as e:
self._conn.rollback()
raise BoardRecordNotFoundException from e
finally:
self._lock.release()
if result is None:
raise BoardRecordNotFoundException
return BoardRecord(**dict(result))
@@ -85,10 +102,11 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
changes: BoardChanges,
) -> BoardRecord:
try:
cursor = self._conn.cursor()
self._lock.acquire()
# Change the name of a board
if changes.board_name is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE boards
SET board_name = ?
@@ -99,7 +117,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
# Change the cover image of a board
if changes.cover_image_name is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE boards
SET cover_image_name = ?
@@ -110,7 +128,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
# Change the archived status of a board
if changes.archived is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE boards
SET archived = ?
@@ -123,6 +141,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise BoardRecordSaveException from e
finally:
self._lock.release()
return self.get(board_id)
def get_many(
@@ -133,10 +153,11 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
limit: int = 10,
include_archived: bool = False,
) -> OffsetPaginatedResults[BoardRecord]:
cursor = self._conn.cursor()
try:
self._lock.acquire()
# Build base query
base_query = """
# Build base query
base_query = """
SELECT *
FROM boards
{archived_filter}
@@ -144,67 +165,81 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
LIMIT ? OFFSET ?;
"""
# Determine archived filter condition
archived_filter = "" if include_archived else "WHERE archived = 0"
# Determine archived filter condition
archived_filter = "" if include_archived else "WHERE archived = 0"
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
# Execute query to fetch boards
cursor.execute(final_query, (limit, offset))
# Execute query to fetch boards
self._cursor.execute(final_query, (limit, offset))
result = cast(list[sqlite3.Row], cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
result = cast(list[sqlite3.Row], self._cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
# Determine count query
if include_archived:
count_query = """
# Determine count query
if include_archived:
count_query = """
SELECT COUNT(*)
FROM boards;
"""
else:
count_query = """
else:
count_query = """
SELECT COUNT(*)
FROM boards
WHERE archived = 0;
"""
# Execute count query
cursor.execute(count_query)
# Execute count query
self._cursor.execute(count_query)
count = cast(int, cursor.fetchone()[0])
count = cast(int, self._cursor.fetchone()[0])
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
def get_all(
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
) -> list[BoardRecord]:
cursor = self._conn.cursor()
if order_by == BoardRecordOrderBy.Name:
base_query = """
try:
self._lock.acquire()
if order_by == BoardRecordOrderBy.Name:
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY LOWER(board_name) {direction}
"""
else:
base_query = """
else:
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY {order_by} {direction}
"""
archived_filter = "" if include_archived else "WHERE archived = 0"
archived_filter = "" if include_archived else "WHERE archived = 0"
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
cursor.execute(final_query)
self._cursor.execute(final_query)
result = cast(list[sqlite3.Row], cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
result = cast(list[sqlite3.Row], self._cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
return boards
return boards
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()

View File

@@ -63,11 +63,7 @@ class BulkDownloadService(BulkDownloadBase):
return [self._invoker.services.images.get_dto(image_name) for image_name in image_names]
def _board_handler(self, board_id: str) -> list[ImageDTO]:
image_names = self._invoker.services.board_image_records.get_all_board_image_names_for_board(
board_id,
categories=None,
is_intermediate=None,
)
image_names = self._invoker.services.board_image_records.get_all_board_image_names_for_board(board_id)
return self._image_handler(image_names)
def generate_item_id(self, board_id: Optional[str]) -> str:

View File

@@ -91,7 +91,6 @@ class InvokeAIAppConfig(BaseSettings):
ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
@@ -170,9 +169,6 @@ class InvokeAIAppConfig(BaseSettings):
vram: Optional[float] = Field(default=None, ge=0, description="DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.")
lazy_offload: bool = Field(default=True, description="DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.")
# PyTorch Memory Allocator
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
# DEVICE
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")

View File

@@ -28,7 +28,6 @@ from invokeai.app.services.events.events_common import (
ModelLoadCompleteEvent,
ModelLoadStartedEvent,
QueueClearedEvent,
QueueItemsRetriedEvent,
QueueItemStatusChangedEvent,
)
@@ -40,7 +39,6 @@ if TYPE_CHECKING:
from invokeai.app.services.session_queue.session_queue_common import (
BatchStatus,
EnqueueBatchResult,
RetryItemsResult,
SessionQueueItem,
SessionQueueStatus,
)
@@ -101,10 +99,6 @@ class EventServiceBase:
"""Emitted when a batch is enqueued"""
self.dispatch(BatchEnqueuedEvent.build(enqueue_result))
def emit_queue_items_retried(self, retry_result: "RetryItemsResult") -> None:
"""Emitted when a list of queue items are retried"""
self.dispatch(QueueItemsRetriedEvent.build(retry_result))
def emit_queue_cleared(self, queue_id: str) -> None:
"""Emitted when a queue is cleared"""
self.dispatch(QueueClearedEvent.build(queue_id))

View File

@@ -10,7 +10,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
BatchStatus,
EnqueueBatchResult,
RetryItemsResult,
SessionQueueItem,
SessionQueueStatus,
)
@@ -291,22 +290,6 @@ class BatchEnqueuedEvent(QueueEventBase):
)
@payload_schema.register
class QueueItemsRetriedEvent(QueueEventBase):
"""Event model for queue_items_retried"""
__event_name__ = "queue_items_retried"
retried_item_ids: list[int] = Field(description="The IDs of the queue items that were retried")
@classmethod
def build(cls, retry_result: RetryItemsResult) -> "QueueItemsRetriedEvent":
return cls(
queue_id=retry_result.queue_id,
retried_item_ids=retry_result.retried_item_ids,
)
@payload_schema.register
class QueueClearedEvent(QueueEventBase):
"""Event model for queue_cleared"""

View File

@@ -1,4 +1,5 @@
import sqlite3
import threading
from datetime import datetime
from typing import Optional, Union, cast
@@ -21,14 +22,21 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteImageRecordStorage(ImageRecordStorageBase):
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_lock: threading.RLock
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._cursor = self._conn.cursor()
def get(self, image_name: str) -> ImageRecord:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
f"""--sql
SELECT {IMAGE_DTO_COLS} FROM images
WHERE image_name = ?;
@@ -36,9 +44,12 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
(image_name,),
)
result = cast(Optional[sqlite3.Row], cursor.fetchone())
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordNotFoundException from e
finally:
self._lock.release()
if not result:
raise ImageRecordNotFoundException
@@ -47,8 +58,9 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT metadata FROM images
WHERE image_name = ?;
@@ -56,7 +68,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
(image_name,),
)
result = cast(Optional[sqlite3.Row], cursor.fetchone())
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
if not result:
raise ImageRecordNotFoundException
@@ -65,7 +77,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordNotFoundException from e
finally:
self._lock.release()
def update(
self,
@@ -73,10 +88,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
changes: ImageRecordChanges,
) -> None:
try:
cursor = self._conn.cursor()
self._lock.acquire()
# Change the category of the image
if changes.image_category is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE images
SET image_category = ?
@@ -87,7 +102,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
# Change the session associated with the image
if changes.session_id is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE images
SET session_id = ?
@@ -98,7 +113,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
# Change the image's `is_intermediate`` flag
if changes.is_intermediate is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE images
SET is_intermediate = ?
@@ -109,7 +124,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
# Change the image's `starred`` state
if changes.starred is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE images
SET starred = ?
@@ -122,6 +137,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordSaveException from e
finally:
self._lock.release()
def get_many(
self,
@@ -135,104 +152,110 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> OffsetPaginatedResults[ImageRecord]:
cursor = self._conn.cursor()
try:
self._lock.acquire()
# Manually build two queries - one for the count, one for the records
count_query = """--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
images_query = f"""--sql
SELECT {IMAGE_DTO_COLS}
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
# Manually build two queries - one for the count, one for the records
count_query = """--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
# Unpack the included categories into the query params
for c in category_strings:
query_params.append(c)
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
images_query = f"""--sql
SELECT {IMAGE_DTO_COLS}
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
query_params.append(is_intermediate)
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
# board_id of "none" is reserved for images without a board
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
# Search term condition
if search_term:
query_conditions += """--sql
AND images.metadata LIKE ?
"""
query_params.append(f"%{search_term.lower()}%")
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
if starred_first:
query_pagination = f"""--sql
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
else:
query_pagination = f"""--sql
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
# Final images query with pagination
images_query += query_conditions + query_pagination + ";"
# Add all the parameters
images_params = query_params.copy()
# Add the pagination parameters
images_params.extend([limit, offset])
# Unpack the included categories into the query params
for c in category_strings:
query_params.append(c)
# Build the list of images, deserializing each row
cursor.execute(images_query, images_params)
result = cast(list[sqlite3.Row], cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
"""
# Set up and execute the count query, without pagination
count_query += query_conditions + ";"
count_params = query_params.copy()
cursor.execute(count_query, count_params)
count = cast(int, cursor.fetchone()[0])
query_params.append(is_intermediate)
# board_id of "none" is reserved for images without a board
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
# Search term condition
if search_term:
query_conditions += """--sql
AND images.metadata LIKE ?
"""
query_params.append(f"%{search_term.lower()}%")
if starred_first:
query_pagination = f"""--sql
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
else:
query_pagination = f"""--sql
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
# Final images query with pagination
images_query += query_conditions + query_pagination + ";"
# Add all the parameters
images_params = query_params.copy()
# Add the pagination parameters
images_params.extend([limit, offset])
# Build the list of images, deserializing each row
self._cursor.execute(images_query, images_params)
result = cast(list[sqlite3.Row], self._cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
# Set up and execute the count query, without pagination
count_query += query_conditions + ";"
count_params = query_params.copy()
self._cursor.execute(count_query, count_params)
count = cast(int, self._cursor.fetchone()[0])
except sqlite3.Error as e:
self._conn.rollback()
raise e
finally:
self._lock.release()
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
def delete(self, image_name: str) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
DELETE FROM images
WHERE image_name = ?;
@@ -243,48 +266,58 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
finally:
self._lock.release()
def delete_many(self, image_names: list[str]) -> None:
try:
cursor = self._conn.cursor()
placeholders = ",".join("?" for _ in image_names)
self._lock.acquire()
# Construct the SQLite query with the placeholders
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
# Execute the query with the list of IDs as parameters
cursor.execute(query, image_names)
self._cursor.execute(query, image_names)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
finally:
self._lock.release()
def get_intermediates_count(self) -> int:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT COUNT(*) FROM images
WHERE is_intermediate = TRUE;
"""
)
count = cast(int, cursor.fetchone()[0])
self._conn.commit()
return count
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT COUNT(*) FROM images
WHERE is_intermediate = TRUE;
"""
)
count = cast(int, self._cursor.fetchone()[0])
self._conn.commit()
return count
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
finally:
self._lock.release()
def delete_intermediates(self) -> list[str]:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT image_name FROM images
WHERE is_intermediate = TRUE;
"""
)
result = cast(list[sqlite3.Row], cursor.fetchall())
result = cast(list[sqlite3.Row], self._cursor.fetchall())
image_names = [r[0] for r in result]
cursor.execute(
self._cursor.execute(
"""--sql
DELETE FROM images
WHERE is_intermediate = TRUE;
@@ -295,6 +328,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
finally:
self._lock.release()
def save(
self,
@@ -311,8 +346,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
metadata: Optional[str] = None,
) -> datetime:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
INSERT OR IGNORE INTO images (
image_name,
@@ -345,7 +380,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
)
self._conn.commit()
cursor.execute(
self._cursor.execute(
"""--sql
SELECT created_at
FROM images
@@ -354,30 +389,34 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
(image_name,),
)
created_at = datetime.fromisoformat(cursor.fetchone()[0])
created_at = datetime.fromisoformat(self._cursor.fetchone()[0])
return created_at
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordSaveException from e
finally:
self._lock.release()
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT images.*
FROM images
JOIN board_images ON images.image_name = board_images.image_name
WHERE board_images.board_id = ?
AND images.is_intermediate = FALSE
ORDER BY images.starred DESC, images.created_at DESC
LIMIT 1;
""",
(board_id,),
)
result = cast(Optional[sqlite3.Row], cursor.fetchone())
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT images.*
FROM images
JOIN board_images ON images.image_name = board_images.image_name
WHERE board_images.board_id = ?
AND images.is_intermediate = FALSE
ORDER BY images.starred DESC, images.created_at DESC
LIMIT 1;
""",
(board_id,),
)
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
finally:
self._lock.release()
if result is None:
return None

View File

@@ -265,11 +265,7 @@ class ImageService(ImageServiceABC):
def delete_images_on_board(self, board_id: str):
try:
image_names = self.__invoker.services.board_image_records.get_all_board_image_names_for_board(
board_id,
categories=None,
is_intermediate=None,
)
image_names = self.__invoker.services.board_image_records.get_all_board_image_names_for_board(board_id)
for image_name in image_names:
self.__invoker.services.image_files.delete(image_name)
self.__invoker.services.image_records.delete_many(image_names)
@@ -282,7 +278,7 @@ class ImageService(ImageServiceABC):
self.__invoker.services.logger.error("Failed to delete image files")
raise
except Exception as e:
self.__invoker.services.logger.error(f"Problem deleting image records and files: {str(e)}")
self.__invoker.services.logger.error("Problem deleting image records and files")
raise e
def delete_intermediates(self) -> int:

View File

@@ -1,4 +1,4 @@
# TODO: Should these exceptions subclass existing python exceptions?
# TODO: Should these excpetions subclass existing python exceptions?
class ModelImageFileNotFoundException(Exception):
"""Raised when an image file is not found in storage."""

View File

@@ -78,6 +78,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
"""
super().__init__()
self._db = db
self._cursor = db.conn.cursor()
self._logger = logger
@property
@@ -95,38 +96,38 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
"""
try:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
INSERT INTO models (
id,
config
)
VALUES (?,?);
""",
(
config.key,
config.model_dump_json(),
),
)
self._db.conn.commit()
with self._db.lock:
try:
self._cursor.execute(
"""--sql
INSERT INTO models (
id,
config
)
VALUES (?,?);
""",
(
config.key,
config.model_dump_json(),
),
)
self._db.conn.commit()
except sqlite3.IntegrityError as e:
self._db.conn.rollback()
if "UNIQUE constraint failed" in str(e):
if "models.path" in str(e):
msg = f"A model with path '{config.path}' is already installed"
elif "models.name" in str(e):
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
except sqlite3.IntegrityError as e:
self._db.conn.rollback()
if "UNIQUE constraint failed" in str(e):
if "models.path" in str(e):
msg = f"A model with path '{config.path}' is already installed"
elif "models.name" in str(e):
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
else:
msg = f"A model with key '{config.key}' is already installed"
raise DuplicateModelException(msg) from e
else:
msg = f"A model with key '{config.key}' is already installed"
raise DuplicateModelException(msg) from e
else:
raise e
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
return self.get_model(config.key)
@@ -138,21 +139,21 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
Can raise an UnknownModelException
"""
try:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
DELETE FROM models
WHERE id=?;
""",
(key,),
)
if cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
with self._db.lock:
try:
self._cursor.execute(
"""--sql
DELETE FROM models
WHERE id=?;
""",
(key,),
)
if self._cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig:
record = self.get_model(key)
@@ -163,23 +164,23 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
json_serialized = record.model_dump_json()
try:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
UPDATE models
SET
config=?
WHERE id=?;
""",
(json_serialized, key),
)
if cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
with self._db.lock:
try:
self._cursor.execute(
"""--sql
UPDATE models
SET
config=?
WHERE id=?;
""",
(json_serialized, key),
)
if self._cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
return self.get_model(key)
@@ -191,33 +192,33 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
Exceptions: UnknownModelException
"""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE id=?;
""",
(key,),
)
rows = cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE id=?;
""",
(key,),
)
rows = self._cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
return model
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
rows = cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
rows = self._cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
return model
def exists(self, key: str) -> bool:
@@ -226,15 +227,16 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
:param key: Unique key for the model to be deleted
"""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
select count(*) FROM models
WHERE id=?;
""",
(key,),
)
count = cursor.fetchone()[0]
count = 0
with self._db.lock:
self._cursor.execute(
"""--sql
select count(*) FROM models
WHERE id=?;
""",
(key,),
)
count = self._cursor.fetchone()[0]
return count > 0
def search_by_attr(
@@ -282,18 +284,17 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
where_clause.append("format=?")
bindings.append(model_format)
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
cursor = self._db.conn.cursor()
cursor.execute(
f"""--sql
SELECT config, strftime('%s',updated_at)
FROM models
{where}
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
""",
tuple(bindings),
)
result = cursor.fetchall()
with self._db.lock:
self._cursor.execute(
f"""--sql
SELECT config, strftime('%s',updated_at)
FROM models
{where}
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
""",
tuple(bindings),
)
result = self._cursor.fetchall()
# Parse the model configs.
results: list[AnyModelConfig] = []
@@ -312,28 +313,34 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
"""Return models with the indicated path."""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE path=?;
""",
(str(path),),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
results = []
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE path=?;
""",
(str(path),),
)
results = [
ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall()
]
return results
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
"""Return models with the indicated hash."""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
results = []
with self._db.lock:
self._cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
results = [
ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall()
]
return results
def list_models(
@@ -349,32 +356,33 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
ModelRecordOrderBy.Format: "format",
}
cursor = self._db.conn.cursor()
# Lock so that the database isn't updated while we're doing the two queries.
# query1: get the total number of model configs
cursor.execute(
"""--sql
select count(*) from models;
""",
(),
)
total = int(cursor.fetchone()[0])
with self._db.lock:
# query1: get the total number of model configs
self._cursor.execute(
"""--sql
select count(*) from models;
""",
(),
)
total = int(self._cursor.fetchone()[0])
# query2: fetch key fields
cursor.execute(
f"""--sql
SELECT config
FROM models
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
LIMIT ?
OFFSET ?;
""",
(
per_page,
page * per_page,
),
)
rows = cursor.fetchall()
items = [ModelSummary.model_validate(dict(x)) for x in rows]
return PaginatedResults(page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items)
# query2: fetch key fields
self._cursor.execute(
f"""--sql
SELECT config
FROM models
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
LIMIT ?
OFFSET ?;
""",
(
per_page,
page * per_page,
),
)
rows = self._cursor.fetchall()
items = [ModelSummary.model_validate(dict(x)) for x in rows]
return PaginatedResults(
page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items
)

View File

@@ -1,11 +1,10 @@
from abc import ABC, abstractmethod
from typing import Any, Coroutine, Optional
from typing import Optional
from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
Batch,
BatchStatus,
CancelAllExceptCurrentResult,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByQueueIDResult,
@@ -14,7 +13,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
IsEmptyResult,
IsFullResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
@@ -33,7 +31,7 @@ class SessionQueueBase(ABC):
pass
@abstractmethod
def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> Coroutine[Any, Any, EnqueueBatchResult]:
def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
"""Enqueues all permutations of a batch for execution."""
pass
@@ -114,11 +112,6 @@ class SessionQueueBase(ABC):
"""Cancels all queue items with matching queue ID"""
pass
@abstractmethod
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
"""Cancels all queue items except in-progress items"""
pass
@abstractmethod
def list_queue_items(
self,
@@ -140,8 +133,3 @@ class SessionQueueBase(ABC):
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
"""Sets the session for a session queue item. Use this to update the session state."""
pass
@abstractmethod
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
"""Retries the given queue items"""
pass

View File

@@ -1,7 +1,7 @@
import datetime
import json
from itertools import chain, product
from typing import Generator, Literal, Optional, TypeAlias, Union, cast
from typing import Generator, Iterable, Literal, NamedTuple, Optional, TypeAlias, Union, cast
from pydantic import (
AliasChoices,
@@ -234,9 +234,6 @@ class SessionQueueItemWithoutGraph(BaseModel):
field_values: Optional[list[NodeFieldValue]] = Field(
default=None, description="The field values that were used for this queue item"
)
retried_from_item_id: Optional[int] = Field(
default=None, description="The item_id of the queue item that this item was retried from"
)
@classmethod
def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO":
@@ -347,11 +344,6 @@ class EnqueueBatchResult(BaseModel):
priority: int = Field(description="The priority of the enqueued batch")
class RetryItemsResult(BaseModel):
queue_id: str = Field(description="The ID of the queue")
retried_item_ids: list[int] = Field(description="The IDs of the queue items that were retried")
class ClearResult(BaseModel):
"""Result of clearing the session queue"""
@@ -382,12 +374,6 @@ class CancelByQueueIDResult(CancelByBatchIDsResult):
pass
class CancelAllExceptCurrentResult(CancelByBatchIDsResult):
"""Result of canceling all except current"""
pass
class IsEmptyResult(BaseModel):
"""Result of checking if the session queue is empty"""
@@ -406,143 +392,61 @@ class IsFullResult(BaseModel):
# region Util
def create_session_nfv_tuples(batch: Batch, maximum: int) -> Generator[tuple[str, str, str], None, None]:
def populate_graph(graph: Graph, node_field_values: Iterable[NodeFieldValue]) -> Graph:
"""
Given a batch and a maximum number of sessions to create, generate a tuple of session_id, session_json, and
field_values_json for each session.
Populates the given graph with the given batch data items.
"""
graph_clone = graph.model_copy(deep=True)
for item in node_field_values:
node = graph_clone.get_node(item.node_path)
if node is None:
continue
setattr(node, item.field_name, item.value)
graph_clone.update_node(item.node_path, node)
return graph_clone
The batch has a "source" graph and a data property. The data property is a list of lists of BatchDatum objects.
Each BatchDatum has a field identifier (e.g. a node id and field name), and a list of values to substitute into
the field.
This structure allows us to create a new graph for every possible permutation of BatchDatum objects:
- Each BatchDatum can be "expanded" into a dict of node-field-value tuples - one for each item in the BatchDatum.
- Zip each inner list of expanded BatchDatum objects together. Call this a "batch_data_list".
- Take the cartesian product of all zipped batch_data_lists, resulting in a list of permutations of BatchDatum
- Take the cartesian product of all zipped batch_data_lists, resulting in a list of lists of BatchDatum objects.
Each inner list now represents the substitution values for a single permutation (session).
- For each permutation, substitute the values into the graph
This function is optimized for performance, as it is used to generate a large number of sessions at once.
Args:
batch: The batch to generate sessions from
maximum: The maximum number of sessions to generate
Returns:
A generator that yields tuples of session_id, session_json, and field_values_json for each session. The
generator will stop early if the maximum number of sessions is reached.
def create_session_nfv_tuples(
batch: Batch, maximum: int
) -> Generator[tuple[GraphExecutionState, list[NodeFieldValue], Optional[WorkflowWithoutID]], None, None]:
"""
Create all graph permutations from the given batch data and graph. Yields tuples
of the form (graph, batch_data_items) where batch_data_items is the list of BatchDataItems
that was applied to the graph.
"""
# TODO: Should this be a class method on Batch?
data: list[list[tuple[dict]]] = []
data: list[list[tuple[NodeFieldValue]]] = []
batch_data_collection = batch.data if batch.data is not None else []
for batch_datum_list in batch_data_collection:
node_field_values_to_zip: list[list[dict]] = []
# Expand each BatchDatum into a list of dicts - one for each item in the BatchDatum
# each batch_datum_list needs to be convered to NodeFieldValues and then zipped
node_field_values_to_zip: list[list[NodeFieldValue]] = []
for batch_datum in batch_datum_list:
node_field_values = [
# Note: A tuple here is slightly faster than a dict, but we need the object in dict form to be inserted
# in the session_queue table anyways. So, overall creating NFVs as dicts is faster.
{"node_path": batch_datum.node_path, "field_name": batch_datum.field_name, "value": item}
NodeFieldValue(node_path=batch_datum.node_path, field_name=batch_datum.field_name, value=item)
for item in batch_datum.items
]
node_field_values_to_zip.append(node_field_values)
# Zip the dicts together to create a list of dicts for each permutation
data.append(list(zip(*node_field_values_to_zip, strict=True))) # type: ignore [arg-type]
# We serialize the graph and session once, then mutate the graph dict in place for each session.
#
# This sounds scary, but it's actually fine.
#
# The batch prep logic injects field values into the same fields for each generated session.
#
# For example, after the product operation, we'll end up with a list of node-field-value tuples like this:
# [
# (
# {"node_path": "1", "field_name": "a", "value": 1},
# {"node_path": "2", "field_name": "b", "value": 2},
# {"node_path": "3", "field_name": "c", "value": 3},
# ),
# (
# {"node_path": "1", "field_name": "a", "value": 4},
# {"node_path": "2", "field_name": "b", "value": 5},
# {"node_path": "3", "field_name": "c", "value": 6},
# )
# ]
#
# Note that each tuple has the same length, and each tuple substitutes values in for exactly the same node fields.
# No matter the complexity of the batch, this property holds true.
#
# This means each permutation's substitution can be done in-place on the same graph dict, because it overwrites the
# previous mutation. We only need to serialize the graph once, and then we can mutate it in place for each session.
#
# Previously, we had created new Graph objects for each session, but this was very slow for large (1k+ session
# batches). We then tried dumping the graph to dict and using deep-copy to create a new dict for each session,
# but this was also slow.
#
# Overall, we achieved a 100x speedup by mutating the graph dict in place for each session over creating new Graph
# objects for each session.
#
# We will also mutate the session dict in place, setting a new ID for each session and setting the mutated graph
# dict as the session's graph.
# Dump the batch's graph to a dict once
graph_as_dict = batch.graph.model_dump(warnings=False, exclude_none=True)
# We must provide a Graph object when creating the "dummy" session dict, but we don't actually use it. It will be
# overwritten for each session by the mutated graph_as_dict.
session_dict = GraphExecutionState(graph=Graph()).model_dump(warnings=False, exclude_none=True)
# Now we can create a generator that yields the session_id, session_json, and field_values_json for each session.
# create generator to yield session,nfv tuples
count = 0
# Each batch may have multiple runs, so we need to generate the same number of sessions for each run. The total is
# still limited by the maximum number of sessions.
for _ in range(batch.runs):
for d in product(*data):
if count >= maximum:
# We've reached the maximum number of sessions we may generate
return
# Flatten the list of lists of dicts into a single list of dicts
# TODO(psyche): Is the a more efficient way to do this?
flat_node_field_values = list(chain.from_iterable(d))
# Need a fresh ID for each session
session_id = uuid_string()
# Mutate the session dict in place
session_dict["id"] = session_id
# Substitute the values into the graph
for nfv in flat_node_field_values:
graph_as_dict["nodes"][nfv["node_path"]][nfv["field_name"]] = nfv["value"]
# Mutate the session dict in place
session_dict["graph"] = graph_as_dict
# Serialize the session and field values
# Note the use of pydantic's to_jsonable_python to handle serialization of any python object, including sets.
session_json = json.dumps(session_dict, default=to_jsonable_python)
field_values_json = json.dumps(flat_node_field_values, default=to_jsonable_python)
# Yield the session_id, session_json, and field_values_json
yield (session_id, session_json, field_values_json)
# Increment the count so we know when to stop
graph = populate_graph(batch.graph, flat_node_field_values)
yield (GraphExecutionState(graph=graph), flat_node_field_values, batch.workflow)
count += 1
def calc_session_count(batch: Batch) -> int:
"""
Calculates the number of sessions that would be created by the batch, without incurring the overhead of actually
creating them, as is done in `create_session_nfv_tuples()`.
The count is used to communicate to the user how many sessions were _requested_ to be created, as opposed to how
many were _actually_ created (which may be less due to the maximum number of sessions).
Calculates the number of sessions that would be created by the batch, without incurring
the overhead of actually generating them. Adapted from `create_sessions().
"""
# TODO: Should this be a class method on Batch?
if not batch.data:
@@ -558,75 +462,41 @@ def calc_session_count(batch: Batch) -> int:
return len(data_product) * batch.runs
ValueToInsertTuple: TypeAlias = tuple[
str, # queue_id
str, # session (as stringified JSON)
str, # session_id
str, # batch_id
str | None, # field_values (optional, as stringified JSON)
int, # priority
str | None, # workflow (optional, as stringified JSON)
str | None, # origin (optional)
str | None, # destination (optional)
int | None, # retried_from_item_id (optional, this is always None for new items)
]
"""A type alias for the tuple of values to insert into the session queue table."""
class SessionQueueValueToInsert(NamedTuple):
"""A tuple of values to insert into the session_queue table"""
# Careful with the ordering of this - it must match the insert statement
queue_id: str # queue_id
session: str # session json
session_id: str # session_id
batch_id: str # batch_id
field_values: Optional[str] # field_values json
priority: int # priority
workflow: Optional[str] # workflow json
origin: str | None
destination: str | None
def prepare_values_to_insert(
queue_id: str, batch: Batch, priority: int, max_new_queue_items: int
) -> list[ValueToInsertTuple]:
"""
Given a batch, prepare the values to insert into the session queue table. The list of tuples can be used with an
`executemany` statement to insert multiple rows at once.
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
Args:
queue_id: The ID of the queue to insert the items into
batch: The batch to prepare the values for
priority: The priority of the queue items
max_new_queue_items: The maximum number of queue items to insert
Returns:
A list of tuples to insert into the session queue table. Each tuple contains the following values:
- queue_id
- session (as stringified JSON)
- session_id
- batch_id
- field_values (optional, as stringified JSON)
- priority
- workflow (optional, as stringified JSON)
- origin (optional)
- destination (optional)
- retried_from_item_id (optional, this is always None for new items)
"""
# A tuple is a fast and memory-efficient way to store the values to insert. Previously, we used a NamedTuple, but
# measured a ~5% performance improvement by using a normal tuple instead. For very large batches (10k+ items), the
# this difference becomes noticeable.
#
# So, despite the inferior DX with normal tuples, we use one here for performance reasons.
values_to_insert: list[ValueToInsertTuple] = []
# pydantic's to_jsonable_python handles serialization of any python object, including sets, which json.dumps does
# not support by default. Apparently there are sets somewhere in the graph.
# The same workflow is used for all sessions in the batch - serialize it once
workflow_json = json.dumps(batch.workflow, default=to_jsonable_python) if batch.workflow else None
for session_id, session_json, field_values_json in create_session_nfv_tuples(batch, max_new_queue_items):
def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new_queue_items: int) -> ValuesToInsert:
values_to_insert: ValuesToInsert = []
for session, field_values, workflow in create_session_nfv_tuples(batch, max_new_queue_items):
# sessions must have unique id
session.id = uuid_string()
values_to_insert.append(
(
queue_id,
session_json,
session_id,
batch.batch_id,
field_values_json,
priority,
workflow_json,
batch.origin,
batch.destination,
None,
SessionQueueValueToInsert(
queue_id, # queue_id
session.model_dump_json(warnings=False, exclude_none=True), # session (json)
session.id, # session_id
batch.batch_id, # batch_id
# must use pydantic_encoder bc field_values is a list of models
json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json)
priority, # priority
json.dumps(workflow, default=to_jsonable_python) if workflow else None, # workflow (json)
batch.origin, # origin
batch.destination, # destination
)
)
return values_to_insert

View File

@@ -1,10 +1,7 @@
import asyncio
import json
import sqlite3
import threading
from typing import Optional, Union, cast
from pydantic_core import to_jsonable_python
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
from invokeai.app.services.session_queue.session_queue_common import (
@@ -12,7 +9,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
Batch,
BatchStatus,
CancelAllExceptCurrentResult,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByQueueIDResult,
@@ -21,7 +17,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
IsEmptyResult,
IsFullResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
@@ -37,6 +32,9 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteSessionQueue(SessionQueueBase):
__invoker: Invoker
__conn: sqlite3.Connection
__cursor: sqlite3.Cursor
__lock: threading.RLock
def start(self, invoker: Invoker) -> None:
self.__invoker = invoker
@@ -52,7 +50,9 @@ class SqliteSessionQueue(SessionQueueBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self.__lock = db.lock
self.__conn = db.conn
self.__cursor = self.__conn.cursor()
def _set_in_progress_to_canceled(self) -> None:
"""
@@ -60,8 +60,8 @@ class SqliteSessionQueue(SessionQueueBase):
This is necessary because the invoker may have been killed while processing a queue item.
"""
try:
cursor = self._conn.cursor()
cursor.execute(
self.__lock.acquire()
self.__cursor.execute(
"""--sql
UPDATE session_queue
SET status = 'canceled'
@@ -69,13 +69,14 @@ class SqliteSessionQueue(SessionQueueBase):
"""
)
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
def _get_current_queue_size(self, queue_id: str) -> int:
"""Gets the current number of pending queue items"""
cursor = self._conn.cursor()
cursor.execute(
self.__cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
@@ -85,12 +86,11 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
return cast(int, cursor.fetchone()[0])
return cast(int, self.__cursor.fetchone()[0])
def _get_highest_priority(self, queue_id: str) -> int:
"""Gets the highest priority value in the queue"""
cursor = self._conn.cursor()
cursor.execute(
self.__cursor.execute(
"""--sql
SELECT MAX(priority)
FROM session_queue
@@ -100,14 +100,12 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
return cast(Union[int, None], cursor.fetchone()[0]) or 0
return cast(Union[int, None], self.__cursor.fetchone()[0]) or 0
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
try:
cursor = self._conn.cursor()
self.__lock.acquire()
# TODO: how does this work in a multi-user scenario?
current_queue_size = self._get_current_queue_size(queue_id)
max_queue_size = self.__invoker.services.configuration.max_queue_size
@@ -129,17 +127,19 @@ class SqliteSessionQueue(SessionQueueBase):
if requested_count > enqueued_count:
values_to_insert = values_to_insert[:max_new_queue_items]
cursor.executemany(
self.__cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
self._conn.commit()
self.__conn.commit()
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
enqueue_result = EnqueueBatchResult(
queue_id=queue_id,
requested=requested_count,
@@ -151,19 +151,25 @@ class SqliteSessionQueue(SessionQueueBase):
return enqueue_result
def dequeue(self) -> Optional[SessionQueueItem]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE status = 'pending'
ORDER BY
priority DESC,
item_id ASC
LIMIT 1
"""
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE status = 'pending'
ORDER BY
priority DESC,
item_id ASC
LIMIT 1
"""
)
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
if result is None:
return None
queue_item = SessionQueueItem.queue_item_from_dict(dict(result))
@@ -171,40 +177,52 @@ class SqliteSessionQueue(SessionQueueBase):
return queue_item
def get_next(self, queue_id: str) -> Optional[SessionQueueItem]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
ORDER BY
priority DESC,
created_at ASC
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
ORDER BY
priority DESC,
created_at ASC
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
if result is None:
return None
return SessionQueueItem.queue_item_from_dict(dict(result))
def get_current(self, queue_id: str) -> Optional[SessionQueueItem]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'in_progress'
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'in_progress'
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
if result is None:
return None
return SessionQueueItem.queue_item_from_dict(dict(result))
@@ -218,8 +236,8 @@ class SqliteSessionQueue(SessionQueueBase):
error_traceback: Optional[str] = None,
) -> SessionQueueItem:
try:
cursor = self._conn.cursor()
cursor.execute(
self.__lock.acquire()
self.__cursor.execute(
"""--sql
UPDATE session_queue
SET status = ?, error_type = ?, error_message = ?, error_traceback = ?
@@ -227,10 +245,12 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(status, error_type, error_message, error_traceback, item_id),
)
self._conn.commit()
self.__conn.commit()
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
queue_item = self.get_queue_item(item_id)
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
@@ -238,36 +258,48 @@ class SqliteSessionQueue(SessionQueueBase):
return queue_item
def is_empty(self, queue_id: str) -> IsEmptyResult:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
is_empty = cast(int, cursor.fetchone()[0]) == 0
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
is_empty = cast(int, self.__cursor.fetchone()[0]) == 0
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
return IsEmptyResult(is_empty=is_empty)
def is_full(self, queue_id: str) -> IsFullResult:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
max_queue_size = self.__invoker.services.configuration.max_queue_size
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
max_queue_size = self.__invoker.services.configuration.max_queue_size
is_full = cast(int, self.__cursor.fetchone()[0]) >= max_queue_size
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
return IsFullResult(is_full=is_full)
def clear(self, queue_id: str) -> ClearResult:
try:
cursor = self._conn.cursor()
cursor.execute(
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT COUNT(*)
FROM session_queue
@@ -275,8 +307,8 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
count = cursor.fetchone()[0]
cursor.execute(
count = self.__cursor.fetchone()[0]
self.__cursor.execute(
"""--sql
DELETE
FROM session_queue
@@ -284,16 +316,17 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
self._conn.commit()
self.__conn.commit()
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
self.__invoker.services.events.emit_queue_cleared(queue_id)
return ClearResult(deleted=count)
def prune(self, queue_id: str) -> PruneResult:
try:
cursor = self._conn.cursor()
where = """--sql
WHERE
queue_id = ?
@@ -303,7 +336,8 @@ class SqliteSessionQueue(SessionQueueBase):
OR status = 'canceled'
)
"""
cursor.execute(
self.__lock.acquire()
self.__cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
@@ -311,8 +345,8 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
count = cursor.fetchone()[0]
cursor.execute(
count = self.__cursor.fetchone()[0]
self.__cursor.execute(
f"""--sql
DELETE
FROM session_queue
@@ -320,10 +354,12 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
self._conn.commit()
self.__conn.commit()
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
return PruneResult(deleted=count)
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
@@ -352,8 +388,8 @@ class SqliteSessionQueue(SessionQueueBase):
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
try:
cursor = self._conn.cursor()
current_queue_item = self.get_current(queue_id)
self.__lock.acquire()
placeholders = ", ".join(["?" for _ in batch_ids])
where = f"""--sql
WHERE
@@ -364,7 +400,7 @@ class SqliteSessionQueue(SessionQueueBase):
AND status != 'failed'
"""
params = [queue_id] + batch_ids
cursor.execute(
self.__cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
@@ -372,8 +408,8 @@ class SqliteSessionQueue(SessionQueueBase):
""",
tuple(params),
)
count = cursor.fetchone()[0]
cursor.execute(
count = self.__cursor.fetchone()[0]
self.__cursor.execute(
f"""--sql
UPDATE session_queue
SET status = 'canceled'
@@ -381,18 +417,20 @@ class SqliteSessionQueue(SessionQueueBase):
""",
tuple(params),
)
self._conn.commit()
self.__conn.commit()
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
return CancelByBatchIDsResult(canceled=count)
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
try:
cursor = self._conn.cursor()
current_queue_item = self.get_current(queue_id)
self.__lock.acquire()
where = """--sql
WHERE
queue_id == ?
@@ -402,7 +440,7 @@ class SqliteSessionQueue(SessionQueueBase):
AND status != 'failed'
"""
params = (queue_id, destination)
cursor.execute(
self.__cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
@@ -410,8 +448,8 @@ class SqliteSessionQueue(SessionQueueBase):
""",
params,
)
count = cursor.fetchone()[0]
cursor.execute(
count = self.__cursor.fetchone()[0]
self.__cursor.execute(
f"""--sql
UPDATE session_queue
SET status = 'canceled'
@@ -419,18 +457,20 @@ class SqliteSessionQueue(SessionQueueBase):
""",
params,
)
self._conn.commit()
self.__conn.commit()
if current_queue_item is not None and current_queue_item.destination == destination:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
return CancelByDestinationResult(canceled=count)
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
try:
cursor = self._conn.cursor()
current_queue_item = self.get_current(queue_id)
self.__lock.acquire()
where = """--sql
WHERE
queue_id is ?
@@ -439,7 +479,7 @@ class SqliteSessionQueue(SessionQueueBase):
AND status != 'failed'
"""
params = [queue_id]
cursor.execute(
self.__cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
@@ -447,8 +487,8 @@ class SqliteSessionQueue(SessionQueueBase):
""",
tuple(params),
)
count = cursor.fetchone()[0]
cursor.execute(
count = self.__cursor.fetchone()[0]
self.__cursor.execute(
f"""--sql
UPDATE session_queue
SET status = 'canceled'
@@ -456,7 +496,7 @@ class SqliteSessionQueue(SessionQueueBase):
""",
tuple(params),
)
self._conn.commit()
self.__conn.commit()
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
queue_status = self.get_queue_status(queue_id=queue_id)
@@ -464,64 +504,41 @@ class SqliteSessionQueue(SessionQueueBase):
current_queue_item, batch_status, queue_status
)
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
return CancelByQueueIDResult(canceled=count)
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
try:
cursor = self._conn.cursor()
where = """--sql
WHERE
queue_id == ?
AND status == 'pending'
"""
cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
{where};
""",
(queue_id,),
)
count = cursor.fetchone()[0]
cursor.execute(
f"""--sql
UPDATE session_queue
SET status = 'canceled'
{where};
""",
(queue_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return CancelAllExceptCurrentResult(canceled=count)
def get_queue_item(self, item_id: int) -> SessionQueueItem:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT * FROM session_queue
WHERE
item_id = ?
""",
(item_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT * FROM session_queue
WHERE
item_id = ?
""",
(item_id,),
)
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
if result is None:
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
return SessionQueueItem.queue_item_from_dict(dict(result))
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
try:
cursor = self._conn.cursor()
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
# during execution.
session_json = session.model_dump_json(warnings=False, exclude_none=True)
cursor.execute(
self.__lock.acquire()
self.__cursor.execute(
"""--sql
UPDATE session_queue
SET session = ?
@@ -529,10 +546,12 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(session_json, item_id),
)
self._conn.commit()
self.__conn.commit()
except Exception:
self._conn.rollback()
self.__conn.rollback()
raise
finally:
self.__lock.release()
return self.get_queue_item(item_id)
def list_queue_items(
@@ -543,71 +562,83 @@ class SqliteSessionQueue(SessionQueueBase):
cursor: Optional[int] = None,
status: Optional[QUEUE_ITEM_STATUS] = None,
) -> CursorPaginatedResults[SessionQueueItemDTO]:
cursor_ = self._conn.cursor()
item_id = cursor
query = """--sql
SELECT item_id,
status,
priority,
field_values,
error_type,
error_message,
error_traceback,
created_at,
updated_at,
completed_at,
started_at,
session_id,
batch_id,
queue_id,
origin,
destination
FROM session_queue
WHERE queue_id = ?
"""
params: list[Union[str, int]] = [queue_id]
if status is not None:
query += """--sql
AND status = ?
"""
params.append(status)
if item_id is not None:
query += """--sql
AND (priority < ?) OR (priority = ? AND item_id > ?)
"""
params.extend([priority, priority, item_id])
query += """--sql
ORDER BY
priority DESC,
item_id ASC
LIMIT ?
try:
item_id = cursor
self.__lock.acquire()
query = """--sql
SELECT item_id,
status,
priority,
field_values,
error_type,
error_message,
error_traceback,
created_at,
updated_at,
completed_at,
started_at,
session_id,
batch_id,
queue_id,
origin,
destination
FROM session_queue
WHERE queue_id = ?
"""
params.append(limit + 1)
cursor_.execute(query, params)
results = cast(list[sqlite3.Row], cursor_.fetchall())
items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results]
has_more = False
if len(items) > limit:
# remove the extra item
items.pop()
has_more = True
params: list[Union[str, int]] = [queue_id]
if status is not None:
query += """--sql
AND status = ?
"""
params.append(status)
if item_id is not None:
query += """--sql
AND (priority < ?) OR (priority = ? AND item_id > ?)
"""
params.extend([priority, priority, item_id])
query += """--sql
ORDER BY
priority DESC,
item_id ASC
LIMIT ?
"""
params.append(limit + 1)
self.__cursor.execute(query, params)
results = cast(list[sqlite3.Row], self.__cursor.fetchall())
items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results]
has_more = False
if len(items) > limit:
# remove the extra item
items.pop()
has_more = True
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
return CursorPaginatedResults(items=items, limit=limit, has_more=has_more)
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
GROUP BY status
""",
(queue_id,),
)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
GROUP BY status
""",
(queue_id,),
)
counts_result = cast(list[sqlite3.Row], self.__cursor.fetchall())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
current_item = self.get_current(queue_id=queue_id)
total = sum(row[1] for row in counts_result)
@@ -626,23 +657,29 @@ class SqliteSessionQueue(SessionQueueBase):
)
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status, count(*), origin, destination
FROM session_queue
WHERE
queue_id = ?
AND batch_id = ?
GROUP BY status
""",
(queue_id, batch_id),
)
result = cast(list[sqlite3.Row], cursor.fetchall())
total = sum(row[1] for row in result)
counts: dict[str, int] = {row[0]: row[1] for row in result}
origin = result[0]["origin"] if result else None
destination = result[0]["destination"] if result else None
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT status, count(*), origin, destination
FROM session_queue
WHERE
queue_id = ?
AND batch_id = ?
GROUP BY status
""",
(queue_id, batch_id),
)
result = cast(list[sqlite3.Row], self.__cursor.fetchall())
total = sum(row[1] for row in result)
counts: dict[str, int] = {row[0]: row[1] for row in result}
origin = result[0]["origin"] if result else None
destination = result[0]["destination"] if result else None
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
return BatchStatus(
batch_id=batch_id,
@@ -658,18 +695,24 @@ class SqliteSessionQueue(SessionQueueBase):
)
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
AND destination = ?
GROUP BY status
""",
(queue_id, destination),
)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
try:
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
AND destination = ?
GROUP BY status
""",
(queue_id, destination),
)
counts_result = cast(list[sqlite3.Row], self.__cursor.fetchall())
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
total = sum(row[1] for row in counts_result)
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
@@ -684,68 +727,3 @@ class SqliteSessionQueue(SessionQueueBase):
canceled=counts.get("canceled", 0),
total=total,
)
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
"""Retries the given queue items"""
try:
cursor = self._conn.cursor()
values_to_insert: list[tuple] = []
retried_item_ids: list[int] = []
for item_id in item_ids:
queue_item = self.get_queue_item(item_id)
if queue_item.status not in ("failed", "canceled"):
continue
retried_item_ids.append(item_id)
field_values_json = (
json.dumps(queue_item.field_values, default=to_jsonable_python) if queue_item.field_values else None
)
workflow_json = (
json.dumps(queue_item.workflow, default=to_jsonable_python) if queue_item.workflow else None
)
cloned_session = GraphExecutionState(graph=queue_item.session.graph)
cloned_session_json = cloned_session.model_dump_json(warnings=False, exclude_none=True)
retried_from_item_id = (
queue_item.retried_from_item_id
if queue_item.retried_from_item_id is not None
else queue_item.item_id
)
value_to_insert = (
queue_item.queue_id,
queue_item.batch_id,
queue_item.destination,
field_values_json,
queue_item.origin,
queue_item.priority,
workflow_json,
cloned_session_json,
cloned_session.id,
retried_from_item_id,
)
values_to_insert.append(value_to_insert)
# TODO(psyche): Handle max queue size?
cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
retry_result = RetryItemsResult(
queue_id=queue_id,
retried_item_ids=retried_item_ids,
)
self.__invoker.services.events.emit_queue_items_retried(retry_result)
return retry_result

View File

@@ -51,18 +51,15 @@ class Edge(BaseModel):
source: EdgeConnection = Field(description="The connection for the edge's from node and field")
destination: EdgeConnection = Field(description="The connection for the edge's to node and field")
def __str__(self):
return f"{self.source.node_id}.{self.source.field} -> {self.destination.node_id}.{self.destination.field}"
def get_output_field_type(node: BaseInvocation, field: str) -> Any:
def get_output_field(node: BaseInvocation, field: str) -> Any:
node_type = type(node)
node_outputs = get_type_hints(node_type.get_output_annotation())
node_output_field = node_outputs.get(field) or None
return node_output_field
def get_input_field_type(node: BaseInvocation, field: str) -> Any:
def get_input_field(node: BaseInvocation, field: str) -> Any:
node_type = type(node)
node_inputs = get_type_hints(node_type)
node_input_field = node_inputs.get(field) or None
@@ -96,10 +93,6 @@ def is_list_or_contains_list(t):
return False
def is_any(t: Any) -> bool:
return t == Any or Any in get_args(t)
def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool:
if not from_type:
return False
@@ -109,7 +102,13 @@ def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool:
# TODO: this is pretty forgiving on generic types. Clean that up (need to handle optionals and such)
if from_type and to_type:
# Ports are compatible
if from_type == to_type or is_any(from_type) or is_any(to_type):
if (
from_type == to_type
or from_type == Any
or to_type == Any
or Any in get_args(from_type)
or Any in get_args(to_type)
):
return True
if from_type in get_args(to_type):
@@ -141,10 +140,10 @@ def are_connections_compatible(
"""Determines if a connection between fields of two nodes is compatible."""
# TODO: handle iterators and collectors
from_type = get_output_field_type(from_node, from_field)
to_type = get_input_field_type(to_node, to_field)
from_node_field = get_output_field(from_node, from_field)
to_node_field = get_input_field(to_node, to_field)
return are_connection_types_compatible(from_type, to_type)
return are_connection_types_compatible(from_node_field, to_node_field)
T = TypeVar("T")
@@ -441,19 +440,17 @@ class Graph(BaseModel):
self.get_node(edge.destination.node_id),
edge.destination.field,
):
raise InvalidEdgeError(f"Edge source and target types do not match ({edge})")
raise InvalidEdgeError(
f"Invalid edge from {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
# Validate all iterators & collectors
# TODO: may need to validate all iterators & collectors in subgraphs so edge connections in parent graphs will be available
for node in self.nodes.values():
if isinstance(node, IterateInvocation):
err = self._is_iterator_connection_valid(node.id)
if err is not None:
raise InvalidEdgeError(f"Invalid iterator node ({node.id}): {err}")
if isinstance(node, CollectInvocation):
err = self._is_collector_connection_valid(node.id)
if err is not None:
raise InvalidEdgeError(f"Invalid collector node ({node.id}): {err}")
if isinstance(node, IterateInvocation) and not self._is_iterator_connection_valid(node.id):
raise InvalidEdgeError(f"Invalid iterator node {node.id}")
if isinstance(node, CollectInvocation) and not self._is_collector_connection_valid(node.id):
raise InvalidEdgeError(f"Invalid collector node {node.id}")
return None
@@ -480,11 +477,11 @@ class Graph(BaseModel):
def _is_destination_field_Any(self, edge: Edge) -> bool:
"""Checks if the destination field for an edge is of type typing.Any"""
return get_input_field_type(self.get_node(edge.destination.node_id), edge.destination.field) == Any
return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == Any
def _is_destination_field_list_of_Any(self, edge: Edge) -> bool:
"""Checks if the destination field for an edge is of type typing.Any"""
return get_input_field_type(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any]
return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any]
def _validate_edge(self, edge: Edge):
"""Validates that a new edge doesn't create a cycle in the graph"""
@@ -494,40 +491,55 @@ class Graph(BaseModel):
from_node = self.get_node(edge.source.node_id)
to_node = self.get_node(edge.destination.node_id)
except NodeNotFoundError:
raise InvalidEdgeError(f"One or both nodes don't exist ({edge})")
raise InvalidEdgeError("One or both nodes don't exist: {edge.source.node_id} -> {edge.destination.node_id}")
# Validate that an edge to this node+field doesn't already exist
input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)
if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):
raise InvalidEdgeError(f"Edge already exists ({edge})")
raise InvalidEdgeError(
f"Edge to node {edge.destination.node_id} field {edge.destination.field} already exists"
)
# Validate that no cycles would be created
g = self.nx_graph_flat()
g.add_edge(edge.source.node_id, edge.destination.node_id)
if not nx.is_directed_acyclic_graph(g):
raise InvalidEdgeError(f"Edge creates a cycle in the graph ({edge})")
raise InvalidEdgeError(
f"Edge creates a cycle in the graph: {edge.source.node_id} -> {edge.destination.node_id}"
)
# Validate that the field types are compatible
if not are_connections_compatible(from_node, edge.source.field, to_node, edge.destination.field):
raise InvalidEdgeError(f"Field types are incompatible ({edge})")
raise InvalidEdgeError(
f"Fields are incompatible: cannot connect {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
# Validate if iterator output type matches iterator input type (if this edge results in both being set)
if isinstance(to_node, IterateInvocation) and edge.destination.field == "collection":
err = self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source)
if err is not None:
raise InvalidEdgeError(f"Iterator input type does not match iterator output type ({edge}): {err}")
if not self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source):
raise InvalidEdgeError(
f"Iterator input type does not match iterator output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
# Validate if iterator input type matches output type (if this edge results in both being set)
if isinstance(from_node, IterateInvocation) and edge.source.field == "item":
err = self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination)
if err is not None:
raise InvalidEdgeError(f"Iterator output type does not match iterator input type ({edge}): {err}")
if not self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination):
raise InvalidEdgeError(
f"Iterator output type does not match iterator input type:, {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
# Validate if collector input type matches output type (if this edge results in both being set)
if isinstance(to_node, CollectInvocation) and edge.destination.field == "item":
err = self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source)
if err is not None:
raise InvalidEdgeError(f"Collector output type does not match collector input type ({edge}): {err}")
if not self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source):
raise InvalidEdgeError(
f"Collector output type does not match collector input type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
# Validate that we are not connecting collector to iterator (currently unsupported)
if isinstance(from_node, CollectInvocation) and isinstance(to_node, IterateInvocation):
raise InvalidEdgeError(
f"Cannot connect collector to iterator: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
# Validate if collector output type matches input type (if this edge results in both being set) - skip if the destination field is not Any or list[Any]
if (
@@ -536,9 +548,10 @@ class Graph(BaseModel):
and not self._is_destination_field_list_of_Any(edge)
and not self._is_destination_field_Any(edge)
):
err = self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination)
if err is not None:
raise InvalidEdgeError(f"Collector input type does not match collector output type ({edge}): {err}")
if not self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination):
raise InvalidEdgeError(
f"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
def has_node(self, node_id: str) -> bool:
"""Determines whether or not a node exists in the graph."""
@@ -621,7 +634,7 @@ class Graph(BaseModel):
node_id: str,
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> str | None:
) -> bool:
inputs = [e.source for e in self._get_input_edges(node_id, "collection")]
outputs = [e.destination for e in self._get_output_edges(node_id, "item")]
@@ -632,47 +645,29 @@ class Graph(BaseModel):
# Only one input is allowed for iterators
if len(inputs) > 1:
return "Iterator may only have one input edge"
input_node = self.get_node(inputs[0].node_id)
return False
# Get input and output fields (the fields linked to the iterator's input/output)
input_field_type = get_output_field_type(input_node, inputs[0].field)
output_field_types = [get_input_field_type(self.get_node(e.node_id), e.field) for e in outputs]
input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field)
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
# Input type must be a list
if get_origin(input_field_type) is not list:
return "Iterator input must be a collection"
if get_origin(input_field) is not list:
return False
# Validate that all outputs match the input type
input_field_item_type = get_args(input_field_type)[0]
if not all((are_connection_types_compatible(input_field_item_type, t) for t in output_field_types)):
return "Iterator outputs must connect to an input with a matching type"
input_field_item_type = get_args(input_field)[0]
if not all((are_connection_types_compatible(input_field_item_type, f) for f in output_fields)):
return False
# Collector input type must match all iterator output types
if isinstance(input_node, CollectInvocation):
# Traverse the graph to find the first collector input edge. Collectors validate that their collection
# inputs are all of the same type, so we can use the first input edge to determine the collector's type
first_collector_input_edge = self._get_input_edges(input_node.id, "item")[0]
first_collector_input_type = get_output_field_type(
self.get_node(first_collector_input_edge.source.node_id), first_collector_input_edge.source.field
)
resolved_collector_type = (
first_collector_input_type
if get_origin(first_collector_input_type) is None
else get_args(first_collector_input_type)
)
if not all((are_connection_types_compatible(resolved_collector_type, t) for t in output_field_types)):
return "Iterator collection type must match all iterator output types"
return None
return True
def _is_collector_connection_valid(
self,
node_id: str,
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> str | None:
) -> bool:
inputs = [e.source for e in self._get_input_edges(node_id, "item")]
outputs = [e.destination for e in self._get_output_edges(node_id, "collection")]
@@ -682,42 +677,38 @@ class Graph(BaseModel):
outputs.append(new_output)
# Get input and output fields (the fields linked to the iterator's input/output)
input_field_types = [get_output_field_type(self.get_node(e.node_id), e.field) for e in inputs]
output_field_types = [get_input_field_type(self.get_node(e.node_id), e.field) for e in outputs]
input_fields = [get_output_field(self.get_node(e.node_id), e.field) for e in inputs]
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
# Validate that all inputs are derived from or match a single type
input_field_types = {
resolved_type
for input_field_type in input_field_types
for resolved_type in (
[input_field_type] if get_origin(input_field_type) is None else get_args(input_field_type)
)
if resolved_type != NoneType
t
for input_field in input_fields
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
if t != NoneType
} # Get unique types
type_tree = nx.DiGraph()
type_tree.add_nodes_from(input_field_types)
type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])])
type_degrees = type_tree.in_degree(type_tree.nodes)
if sum((t[1] == 0 for t in type_degrees)) != 1: # type: ignore
return "Collector input collection items must be of a single type"
return False # There is more than one root type
# Get the input root type
input_root_type = next(t[0] for t in type_degrees if t[1] == 0) # type: ignore
# Verify that all outputs are lists
if not all(is_list_or_contains_list(t) or is_any(t) for t in output_field_types):
return "Collector output must connect to a collection input"
if not all(is_list_or_contains_list(f) for f in output_fields):
return False
# Verify that all outputs match the input type (are a base class or the same class)
if not all(
is_any(t)
or is_union_subtype(input_root_type, get_args(t)[0])
or issubclass(input_root_type, get_args(t)[0])
for t in output_field_types
is_union_subtype(input_root_type, get_args(f)[0]) or issubclass(input_root_type, get_args(f)[0])
for f in output_fields
):
return "Collector outputs must connect to a collection input with a matching type"
return False
return None
return True
def nx_graph(self) -> nx.DiGraph:
"""Returns a NetworkX DiGraph representing the layout of this graph"""

View File

@@ -9,7 +9,6 @@ from torch import Tensor
from invokeai.app.invocations.constants import IMAGE_MODES
from invokeai.app.invocations.fields import MetadataField, WithBoard, WithMetadata
from invokeai.app.services.board_records.board_records_common import BoardRecordOrderBy
from invokeai.app.services.boards.boards_common import BoardDTO
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
@@ -17,7 +16,6 @@ from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.model_records.model_records_base import UnknownModelException
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
from invokeai.backend.model_manager.config import (
AnyModel,
@@ -104,9 +102,7 @@ class BoardsInterface(InvocationContextInterface):
Returns:
A list of all boards.
"""
return self._services.boards.get_all(
order_by=BoardRecordOrderBy.CreatedAt, direction=SQLiteDirection.Descending
)
return self._services.boards.get_all()
def add_image_to_board(self, board_id: str, image_name: str) -> None:
"""Adds an image to a board.
@@ -126,11 +122,7 @@ class BoardsInterface(InvocationContextInterface):
Returns:
A list of all image names for the board.
"""
return self._services.board_images.get_all_board_image_names_for_board(
board_id,
categories=None,
is_intermediate=None,
)
return self._services.board_images.get_all_board_image_names_for_board(board_id)
class LoggerInterface(InvocationContextInterface):
@@ -291,7 +283,7 @@ class ImagesInterface(InvocationContextInterface):
Returns:
The local path of the image or thumbnail.
"""
return Path(self._services.images.get_path(image_name, thumbnail))
return self._services.images.get_path(image_name, thumbnail)
class TensorsInterface(InvocationContextInterface):

View File

@@ -1,4 +1,5 @@
import sqlite3
import threading
from logging import Logger
from pathlib import Path
@@ -37,20 +38,14 @@ class SqliteDatabase:
self.logger.info(f"Initializing database at {self.db_path}")
self.conn = sqlite3.connect(database=self.db_path or sqlite_memory, check_same_thread=False)
self.lock = threading.RLock()
self.conn.row_factory = sqlite3.Row
if self.verbose:
self.conn.set_trace_callback(self.logger.debug)
# Enable foreign key constraints
self.conn.execute("PRAGMA foreign_keys = ON;")
# Enable Write-Ahead Logging (WAL) mode for better concurrency
self.conn.execute("PRAGMA journal_mode = WAL;")
# Set a busy timeout to prevent database lockups during writes
self.conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
def clean(self) -> None:
"""
Cleans the database by running the VACUUM command, reporting on the freed space.
@@ -58,14 +53,15 @@ class SqliteDatabase:
# No need to clean in-memory database
if not self.db_path:
return
try:
initial_db_size = Path(self.db_path).stat().st_size
self.conn.execute("VACUUM;")
self.conn.commit()
final_db_size = Path(self.db_path).stat().st_size
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
if freed_space_in_mb > 0:
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
except Exception as e:
self.logger.error(f"Error cleaning database: {e}")
raise
with self.lock:
try:
initial_db_size = Path(self.db_path).stat().st_size
self.conn.execute("VACUUM;")
self.conn.commit()
final_db_size = Path(self.db_path).stat().st_size
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
if freed_space_in_mb > 0:
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
except Exception as e:
self.logger.error(f"Error cleaning database: {e}")
raise

View File

@@ -18,7 +18,6 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_12 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_13 import build_migration_13
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import build_migration_16
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -54,7 +53,6 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_13())
migrator.register_migration(build_migration_14())
migrator.register_migration(build_migration_15())
migrator.register_migration(build_migration_16())
migrator.run_migrations()
return db

View File

@@ -1,31 +0,0 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration16Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
self._add_retried_from_item_id_col(cursor)
def _add_retried_from_item_id_col(self, cursor: sqlite3.Cursor) -> None:
"""
- Adds `retried_from_item_id` column to the session queue table.
"""
cursor.execute("ALTER TABLE session_queue ADD COLUMN retried_from_item_id INTEGER;")
def build_migration_16() -> Migration:
"""
Build the migration from database version 15 to 16.
This migration does the following:
- Adds `retried_from_item_id` column to the session queue table.
"""
migration_16 = Migration(
from_version=15,
to_version=16,
callback=Migration16Callback(),
)
return migration_16

View File

@@ -43,45 +43,46 @@ class SqliteMigrator:
def run_migrations(self) -> bool:
"""Migrates the database to the latest version."""
# This throws if there is a problem.
self._migration_set.validate_migration_chain()
cursor = self._db.conn.cursor()
self._create_migrations_table(cursor=cursor)
with self._db.lock:
# This throws if there is a problem.
self._migration_set.validate_migration_chain()
cursor = self._db.conn.cursor()
self._create_migrations_table(cursor=cursor)
if self._migration_set.count == 0:
self._logger.debug("No migrations registered")
return False
if self._migration_set.count == 0:
self._logger.debug("No migrations registered")
return False
if self._get_current_version(cursor=cursor) == self._migration_set.latest_version:
self._logger.debug("Database is up to date, no migrations to run")
return False
if self._get_current_version(cursor=cursor) == self._migration_set.latest_version:
self._logger.debug("Database is up to date, no migrations to run")
return False
self._logger.info("Database update needed")
self._logger.info("Database update needed")
# Make a backup of the db if it needs to be updated and is a file db
if self._db.db_path is not None:
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
self._logger.info(f"Backing up database to {str(self._backup_path)}")
# Use SQLite to do the backup
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
self._db.conn.backup(backup_conn)
else:
self._logger.info("Using in-memory database, no backup needed")
# Make a backup of the db if it needs to be updated and is a file db
if self._db.db_path is not None:
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
self._logger.info(f"Backing up database to {str(self._backup_path)}")
# Use SQLite to do the backup
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
self._db.conn.backup(backup_conn)
else:
self._logger.info("Using in-memory database, no backup needed")
next_migration = self._migration_set.get(from_version=self._get_current_version(cursor))
while next_migration is not None:
self._run_migration(next_migration)
next_migration = self._migration_set.get(self._get_current_version(cursor))
self._logger.info("Database updated successfully")
return True
next_migration = self._migration_set.get(from_version=self._get_current_version(cursor))
while next_migration is not None:
self._run_migration(next_migration)
next_migration = self._migration_set.get(self._get_current_version(cursor))
self._logger.info("Database updated successfully")
return True
def _run_migration(self, migration: Migration) -> None:
"""Runs a single migration."""
try:
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
# exception is raised.
with self._db.conn as conn:
with self._db.lock, self._db.conn as conn:
cursor = conn.cursor()
if self._get_current_version(cursor) != migration.from_version:
raise MigrationError(
@@ -107,26 +108,27 @@ class SqliteMigrator:
def _create_migrations_table(self, cursor: sqlite3.Cursor) -> None:
"""Creates the migrations table for the database, if one does not already exist."""
try:
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='migrations';")
if cursor.fetchone() is not None:
return
cursor.execute(
"""--sql
CREATE TABLE migrations (
version INTEGER PRIMARY KEY,
migrated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))
);
"""
)
cursor.execute("INSERT INTO migrations (version) VALUES (0);")
cursor.connection.commit()
self._logger.debug("Created migrations table")
except sqlite3.Error as e:
msg = f"Problem creating migrations table: {e}"
self._logger.error(msg)
cursor.connection.rollback()
raise MigrationError(msg) from e
with self._db.lock:
try:
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='migrations';")
if cursor.fetchone() is not None:
return
cursor.execute(
"""--sql
CREATE TABLE migrations (
version INTEGER PRIMARY KEY,
migrated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))
);
"""
)
cursor.execute("INSERT INTO migrations (version) VALUES (0);")
cursor.connection.commit()
self._logger.debug("Created migrations table")
except sqlite3.Error as e:
msg = f"Problem creating migrations table: {e}"
self._logger.error(msg)
cursor.connection.rollback()
raise MigrationError(msg) from e
@classmethod
def _get_current_version(cls, cursor: sqlite3.Cursor) -> int:

View File

@@ -17,7 +17,9 @@ from invokeai.app.util.misc import uuid_string
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._cursor = self._conn.cursor()
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
@@ -25,25 +27,31 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
"""Gets a style preset by ID."""
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM style_presets
WHERE id = ?;
""",
(style_preset_id,),
)
row = cursor.fetchone()
if row is None:
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
return StylePresetRecordDTO.from_dict(dict(row))
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
SELECT *
FROM style_presets
WHERE id = ?;
""",
(style_preset_id,),
)
row = self._cursor.fetchone()
if row is None:
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
return StylePresetRecordDTO.from_dict(dict(row))
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
style_preset_id = uuid_string()
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
INSERT OR IGNORE INTO style_presets (
id,
@@ -64,16 +72,18 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return self.get(style_preset_id)
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
style_preset_ids = []
try:
cursor = self._conn.cursor()
self._lock.acquire()
for style_preset in style_presets:
style_preset_id = uuid_string()
style_preset_ids.append(style_preset_id)
cursor.execute(
self._cursor.execute(
"""--sql
INSERT OR IGNORE INTO style_presets (
id,
@@ -94,15 +104,17 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return None
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
try:
cursor = self._conn.cursor()
self._lock.acquire()
# Change the name of a style preset
if changes.name is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE style_presets
SET name = ?
@@ -113,7 +125,7 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
# Change the preset data for a style preset
if changes.preset_data is not None:
cursor.execute(
self._cursor.execute(
"""--sql
UPDATE style_presets
SET preset_data = ?
@@ -126,12 +138,14 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return self.get(style_preset_id)
def delete(self, style_preset_id: str) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
DELETE from style_presets
WHERE id = ?;
@@ -142,38 +156,46 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return None
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
main_query = """
SELECT
*
FROM style_presets
"""
try:
self._lock.acquire()
main_query = """
SELECT
*
FROM style_presets
"""
if type is not None:
main_query += "WHERE type = ? "
if type is not None:
main_query += "WHERE type = ? "
main_query += "ORDER BY LOWER(name) ASC"
main_query += "ORDER BY LOWER(name) ASC"
cursor = self._conn.cursor()
if type is not None:
cursor.execute(main_query, (type,))
else:
cursor.execute(main_query)
if type is not None:
self._cursor.execute(main_query, (type,))
else:
self._cursor.execute(main_query)
rows = cursor.fetchall()
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
rows = self._cursor.fetchall()
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
return style_presets
return style_presets
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
def _sync_default_style_presets(self) -> None:
"""Syncs default style presets to the database. Internal use only."""
# First delete all existing default style presets
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
DELETE FROM style_presets
WHERE type = "default";
@@ -183,8 +205,10 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
# Next, parse and create the default style presets
with open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
with self._lock, open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
presets = json.load(file)
for preset in presets:
style_preset = StylePresetWithoutId.model_validate(preset)

View File

@@ -62,13 +62,9 @@ class WorkflowWithoutID(BaseModel):
notes: str = Field(description="The notes of the workflow.")
exposedFields: list[ExposedField] = Field(description="The exposed fields of the workflow.")
meta: WorkflowMeta = Field(description="The meta of the workflow.")
# TODO(psyche): nodes, edges and form are very loosely typed - they are strictly modeled and checked on the frontend.
# TODO: nodes and edges are very loosely typed
nodes: list[dict[str, JsonValue]] = Field(description="The nodes of the workflow.")
edges: list[dict[str, JsonValue]] = Field(description="The edges of the workflow.")
# TODO(psyche): We have a crapload of workflows that have no form, bc it was added after we introduced workflows.
# This is typed as optional to prevent errors when pulling workflows from the DB. The frontend adds a default form if
# it is None.
form: dict[str, JsonValue] | None = Field(default=None, description="The form of the workflow.")
model_config = ConfigDict(extra="ignore")

View File

@@ -23,7 +23,9 @@ from invokeai.app.util.misc import uuid_string
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._cursor = self._conn.cursor()
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
@@ -31,36 +33,42 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
def get(self, workflow_id: str) -> WorkflowRecordDTO:
"""Gets a workflow by ID. Updates the opened_at column."""
cursor = self._conn.cursor()
cursor.execute(
"""--sql
UPDATE workflow_library
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
WHERE workflow_id = ?;
""",
(workflow_id,),
)
self._conn.commit()
cursor.execute(
"""--sql
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
FROM workflow_library
WHERE workflow_id = ?;
""",
(workflow_id,),
)
row = cursor.fetchone()
if row is None:
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
return WorkflowRecordDTO.from_dict(dict(row))
try:
self._lock.acquire()
self._cursor.execute(
"""--sql
UPDATE workflow_library
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
WHERE workflow_id = ?;
""",
(workflow_id,),
)
self._conn.commit()
self._cursor.execute(
"""--sql
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
FROM workflow_library
WHERE workflow_id = ?;
""",
(workflow_id,),
)
row = self._cursor.fetchone()
if row is None:
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
return WorkflowRecordDTO.from_dict(dict(row))
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
try:
# Only user workflows may be created by this method
assert workflow.meta.category is WorkflowCategory.User
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
INSERT OR IGNORE INTO workflow_library (
workflow_id,
@@ -74,12 +82,14 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return self.get(workflow_with_id.id)
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
UPDATE workflow_library
SET workflow = ?
@@ -91,12 +101,14 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return self.get(workflow.id)
def delete(self, workflow_id: str) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
self._lock.acquire()
self._cursor.execute(
"""--sql
DELETE from workflow_library
WHERE workflow_id = ? AND category = 'user';
@@ -107,6 +119,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
return None
def get_many(
@@ -118,60 +132,66 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
per_page: Optional[int] = None,
query: Optional[str] = None,
) -> PaginatedResults[WorkflowRecordListItemDTO]:
# sanitize!
assert order_by in WorkflowRecordOrderBy
assert direction in SQLiteDirection
assert category in WorkflowCategory
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
main_query = """
SELECT
workflow_id,
category,
name,
description,
created_at,
updated_at,
opened_at
FROM workflow_library
WHERE category = ?
"""
main_params: list[int | str] = [category.value]
count_params: list[int | str] = [category.value]
try:
self._lock.acquire()
# sanitize!
assert order_by in WorkflowRecordOrderBy
assert direction in SQLiteDirection
assert category in WorkflowCategory
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
main_query = """
SELECT
workflow_id,
category,
name,
description,
created_at,
updated_at,
opened_at
FROM workflow_library
WHERE category = ?
"""
main_params: list[int | str] = [category.value]
count_params: list[int | str] = [category.value]
stripped_query = query.strip() if query else None
if stripped_query:
wildcard_query = "%" + stripped_query + "%"
main_query += " AND name LIKE ? OR description LIKE ? "
count_query += " AND name LIKE ? OR description LIKE ?;"
main_params.extend([wildcard_query, wildcard_query])
count_params.extend([wildcard_query, wildcard_query])
stripped_query = query.strip() if query else None
if stripped_query:
wildcard_query = "%" + stripped_query + "%"
main_query += " AND name LIKE ? OR description LIKE ? "
count_query += " AND name LIKE ? OR description LIKE ?;"
main_params.extend([wildcard_query, wildcard_query])
count_params.extend([wildcard_query, wildcard_query])
main_query += f" ORDER BY {order_by.value} {direction.value}"
main_query += f" ORDER BY {order_by.value} {direction.value}"
if per_page:
main_query += " LIMIT ? OFFSET ?"
main_params.extend([per_page, page * per_page])
if per_page:
main_query += " LIMIT ? OFFSET ?"
main_params.extend([per_page, page * per_page])
cursor = self._conn.cursor()
cursor.execute(main_query, main_params)
rows = cursor.fetchall()
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
self._cursor.execute(main_query, main_params)
rows = self._cursor.fetchall()
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
cursor.execute(count_query, count_params)
total = cursor.fetchone()[0]
self._cursor.execute(count_query, count_params)
total = self._cursor.fetchone()[0]
if per_page:
pages = total // per_page + (total % per_page > 0)
else:
pages = 1 # If no pagination, there is only one page
if per_page:
pages = total // per_page + (total % per_page > 0)
else:
pages = 1 # If no pagination, there is only one page
return PaginatedResults(
items=workflows,
page=page,
per_page=per_page if per_page else total,
pages=pages,
total=total,
)
return PaginatedResults(
items=workflows,
page=page,
per_page=per_page if per_page else total,
pages=pages,
total=total,
)
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()
def _sync_default_workflows(self) -> None:
"""Syncs default workflows to the database. Internal use only."""
@@ -187,6 +207,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
"""
try:
self._lock.acquire()
workflows: list[Workflow] = []
workflows_dir = Path(__file__).parent / Path("default_workflows")
workflow_paths = workflows_dir.glob("*.json")
@@ -197,15 +218,14 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
workflows.append(workflow)
# Only default workflows may be managed by this method
assert all(w.meta.category is WorkflowCategory.Default for w in workflows)
cursor = self._conn.cursor()
cursor.execute(
self._cursor.execute(
"""--sql
DELETE FROM workflow_library
WHERE category = 'default';
"""
)
for w in workflows:
cursor.execute(
self._cursor.execute(
"""--sql
INSERT OR REPLACE INTO workflow_library (
workflow_id,
@@ -219,3 +239,5 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
except Exception:
self._conn.rollback()
raise
finally:
self._lock.release()

View File

@@ -1,64 +0,0 @@
import logging
import mimetypes
import socket
import torch
def find_open_port(port: int) -> int:
"""Find a port not in use starting at given port"""
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
# https://github.com/WaylonWalker
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(1)
if s.connect_ex(("localhost", port)) == 0:
return find_open_port(port=port + 1)
else:
return port
def check_cudnn(logger: logging.Logger) -> None:
"""Check for cuDNN issues that could be causing degraded performance."""
if torch.backends.cudnn.is_available():
try:
# Note: At the time of writing (torch 2.2.1), torch.backends.cudnn.version() only raises an error the first
# time it is called. Subsequent calls will return the version number without complaining about a mismatch.
cudnn_version = torch.backends.cudnn.version()
logger.info(f"cuDNN version: {cudnn_version}")
except RuntimeError as e:
logger.warning(
"Encountered a cuDNN version issue. This may result in degraded performance. This issue is usually "
"caused by an incompatible cuDNN version installed in your python environment, or on the host "
f"system. Full error message:\n{e}"
)
def enable_dev_reload() -> None:
"""Enable hot reloading on python file changes during development."""
from invokeai.backend.util.logging import InvokeAILogger
try:
import jurigged
except ImportError as e:
raise RuntimeError(
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.'
) from e
else:
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
def apply_monkeypatches() -> None:
"""Apply monkeypatches to fix issues with third-party libraries."""
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
if torch.backends.mps.is_available():
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
def register_mime_types() -> None:
"""Register additional mime types for windows."""
# Fix for windows mimetypes registry entries being borked.
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("text/css", ".css")

View File

@@ -1,52 +0,0 @@
import logging
import os
import sys
def configure_torch_cuda_allocator(pytorch_cuda_alloc_conf: str, logger: logging.Logger):
"""Configure the PyTorch CUDA memory allocator. See
https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf for supported
configurations.
"""
if "torch" in sys.modules:
raise RuntimeError("configure_torch_cuda_allocator() must be called before importing torch.")
# Log a warning if the PYTORCH_CUDA_ALLOC_CONF environment variable is already set.
prev_cuda_alloc_conf = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", None)
if prev_cuda_alloc_conf is not None:
if prev_cuda_alloc_conf == pytorch_cuda_alloc_conf:
logger.info(
f"PYTORCH_CUDA_ALLOC_CONF is already set to '{pytorch_cuda_alloc_conf}'. Skipping configuration."
)
return
else:
logger.warning(
f"Attempted to configure the PyTorch CUDA memory allocator with '{pytorch_cuda_alloc_conf}', but PYTORCH_CUDA_ALLOC_CONF is already set to "
f"'{prev_cuda_alloc_conf}'. Skipping configuration."
)
return
# Configure the PyTorch CUDA memory allocator.
# NOTE: It is important that this happens before torch is imported.
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = pytorch_cuda_alloc_conf
import torch
# Relevant docs: https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf
if not torch.cuda.is_available():
raise RuntimeError(
"Attempted to configure the PyTorch CUDA memory allocator, but no CUDA devices are available."
)
# Verify that the torch allocator was properly configured.
allocator_backend = torch.cuda.get_allocator_backend()
expected_backend = "cudaMallocAsync" if "cudaMallocAsync" in pytorch_cuda_alloc_conf else "native"
if allocator_backend != expected_backend:
raise RuntimeError(
f"Failed to configure the PyTorch CUDA memory allocator. Expected backend: '{expected_backend}', but got "
f"'{allocator_backend}'. Verify that 1) the pytorch_cuda_alloc_conf is set correctly, and 2) that torch is "
"not imported before calling configure_torch_cuda_allocator()."
)
logger.info(f"PyTorch CUDA memory allocator: {torch.cuda.get_allocator_backend()}")

View File

@@ -1,10 +1,8 @@
import gc
import logging
import threading
import time
from functools import wraps
from logging import Logger
from typing import Any, Callable, Dict, List, Optional
from typing import Dict, List, Optional
import psutil
import torch
@@ -43,17 +41,6 @@ def get_model_cache_key(model_key: str, submodel_type: Optional[SubModelType] =
return model_key
def synchronized(method: Callable[..., Any]) -> Callable[..., Any]:
"""A decorator that applies the class's self._lock to the method."""
@wraps(method)
def wrapper(self, *args, **kwargs):
with self._lock: # Automatically acquire and release the lock
return method(self, *args, **kwargs)
return wrapper
class ModelCache:
"""A cache for managing models in memory.
@@ -138,25 +125,16 @@ class ModelCache:
self._ram_cache_size_bytes = self._calc_ram_available_to_model_cache()
# A lock applied to all public method calls to make the ModelCache thread-safe.
# At the time of writing, the ModelCache should only be accessed from two threads:
# - The graph execution thread
# - Requests to empty the cache from a separate thread
self._lock = threading.RLock()
@property
@synchronized
def stats(self) -> Optional[CacheStats]:
"""Return collected CacheStats object."""
return self._stats
@stats.setter
@synchronized
def stats(self, stats: CacheStats) -> None:
"""Set the CacheStats object for collecting cache statistics."""
self._stats = stats
@synchronized
def put(self, key: str, model: AnyModel) -> None:
"""Add a model to the cache."""
if key in self._cached_models:
@@ -195,7 +173,6 @@ class ModelCache:
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)"
)
@synchronized
def get(self, key: str, stats_name: Optional[str] = None) -> CacheRecord:
"""Retrieve a model from the cache.
@@ -231,7 +208,6 @@ class ModelCache:
self._logger.debug(f"Cache hit: {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
return cache_entry
@synchronized
def lock(self, cache_entry: CacheRecord, working_mem_bytes: Optional[int]) -> None:
"""Lock a model for use and move it into VRAM."""
if cache_entry.key not in self._cached_models:
@@ -267,7 +243,6 @@ class ModelCache:
self._log_cache_state()
@synchronized
def unlock(self, cache_entry: CacheRecord) -> None:
"""Unlock a model."""
if cache_entry.key not in self._cached_models:
@@ -425,19 +400,23 @@ class ModelCache:
# Heuristics for dynamically calculating the RAM cache size, **in order of increasing priority**:
# 1. As an initial default, use 50% of the total RAM for InvokeAI.
# - Assume a 2GB baseline for InvokeAI's non-model RAM usage, and use the rest of the RAM for the model cache.
# 2. On a system with a lot of RAM, users probably don't want InvokeAI to eat up too much RAM.
# There are diminishing returns to storing more and more models. So, we apply an upper bound. (Keep in mind
# that most OSes have some amount of disk caching, which we still benefit from if there is excess memory,
# even if we drop models from the cache.)
# 2. On a system with a lot of RAM (e.g. 64GB+), users probably don't want InvokeAI to eat up too much RAM.
# There are diminishing returns to storing more and more models. So, we apply an upper bound.
# - On systems without a CUDA device, the upper bound is 32GB.
# - On systems with a CUDA device, the upper bound is 1x the amount of VRAM (less the working memory).
# 3. Absolute minimum of 4GB.
# - On systems with a CUDA device, the upper bound is 2x the amount of VRAM.
# 3. On systems with a CUDA device, the minimum should be the VRAM size (less the working memory).
# - Setting lower than this would mean that we sometimes kick models out of the cache when there is room for
# all models in VRAM.
# - Consider an extreme case of a system with 8GB RAM / 24GB VRAM. I haven't tested this, but I think
# you'd still want the RAM cache size to be ~24GB (less the working memory). (Though you'd probably want to
# set `keep_ram_copy_of_weights: false` in this case.)
# 4. Absolute minimum of 4GB.
# NOTE(ryand): We explored dynamically adjusting the RAM cache size based on memory pressure (using psutil), but
# decided against it for now, for the following reasons:
# - It was surprisingly difficult to get memory metrics with consistent definitions across OSes. (If you go
# down this path again, don't underestimate the amount of complexity here and be sure to test rigorously on all
# OSes.)
# down this path again, don't underestimate the amount of complexity here and be sure to test rigorously on all
# OSes.)
# - Making the RAM cache size dynamic opens the door for performance regressions that are hard to diagnose and
# hard for users to understand. It is better for users to see that their RAM is maxed out, and then override
# the default value if desired.
@@ -459,18 +438,26 @@ class ModelCache:
# ------------------
max_ram_cache_size_bytes = 32 * GB
if total_cuda_vram_bytes is not None:
if self._max_vram_cache_size_gb is not None:
max_ram_cache_size_bytes = int(self._max_vram_cache_size_gb * GB)
else:
max_ram_cache_size_bytes = total_cuda_vram_bytes - int(self._execution_device_working_mem_gb * GB)
max_ram_cache_size_bytes = 2 * total_cuda_vram_bytes
if ram_available_to_model_cache > max_ram_cache_size_bytes:
heuristics_applied.append(2)
ram_available_to_model_cache = max_ram_cache_size_bytes
# Apply heuristic 3.
# ------------------
if total_cuda_vram_bytes is not None:
if self._max_vram_cache_size_gb is not None:
min_ram_cache_size_bytes = int(self._max_vram_cache_size_gb * GB)
else:
min_ram_cache_size_bytes = total_cuda_vram_bytes - int(self._execution_device_working_mem_gb * GB)
if ram_available_to_model_cache < min_ram_cache_size_bytes:
heuristics_applied.append(3)
ram_available_to_model_cache = min_ram_cache_size_bytes
# Apply heuristic 4.
# ------------------
if ram_available_to_model_cache < 4 * GB:
heuristics_applied.append(3)
heuristics_applied.append(4)
ram_available_to_model_cache = 4 * GB
self._logger.info(
@@ -601,7 +588,6 @@ class ModelCache:
self._logger.debug(log)
@synchronized
def make_room(self, bytes_needed: int) -> None:
"""Make enough room in the cache to accommodate a new model of indicated size.

View File

@@ -7,6 +7,7 @@ from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custo
CustomModuleMixin,
)
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
@@ -21,6 +22,25 @@ def linear_lora_forward(input: torch.Tensor, lora_layer: LoRALayer, lora_weight:
return x
def concatenated_lora_forward(
input: torch.Tensor, concatenated_lora_layer: ConcatenatedLoRALayer, lora_weight: float
) -> torch.Tensor:
"""An optimized implementation of the residual calculation for a sidecar ConcatenatedLoRALayer."""
x_chunks: list[torch.Tensor] = []
for lora_layer in concatenated_lora_layer.lora_layers:
x_chunk = torch.nn.functional.linear(input, lora_layer.down)
if lora_layer.mid is not None:
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.mid)
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.up, bias=lora_layer.bias)
x_chunk *= lora_weight * lora_layer.scale()
x_chunks.append(x_chunk)
# TODO(ryand): Generalize to support concat_axis != 0.
assert concatenated_lora_layer.concat_axis == 0
x = torch.cat(x_chunks, dim=-1)
return x
def autocast_linear_forward_sidecar_patches(
orig_module: torch.nn.Linear, input: torch.Tensor, patches_and_weights: list[tuple[BaseLayerPatch, float]]
) -> torch.Tensor:
@@ -46,6 +66,8 @@ def autocast_linear_forward_sidecar_patches(
output += linear_lora_forward(orig_input, patch, patch_weight)
elif isinstance(patch, LoRALayer):
output += linear_lora_forward(input, patch, patch_weight)
elif isinstance(patch, ConcatenatedLoRALayer):
output += concatenated_lora_forward(input, patch, patch_weight)
else:
unprocessed_patches_and_weights.append((patch, patch_weight))

View File

@@ -3,8 +3,6 @@ import copy
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
class CustomModuleMixin:
@@ -44,20 +42,6 @@ class CustomModuleMixin:
device: torch.device | None = None,
):
"""Helper function that aggregates the parameters from all patches into a single dict."""
# HACK(ryand): If the original parameters are in a quantized format whose weights can't be accessed, we replace
# them with dummy tensors on the 'meta' device. This allows patch layers to access the shapes of the original
# parameters. But, of course, any sub-layers that need to access the actual values of the parameters will fail.
for param_name in orig_params.keys():
param = orig_params[param_name]
if type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor:
pass
elif type(param) is GGMLTensor:
# Move to device and dequantize here. Doing it in the patch layer can result in redundant casts /
# dequantizations.
orig_params[param_name] = param.to(device=device).get_dequantized_tensor()
else:
orig_params[param_name] = torch.empty(get_param_shape(param), device="meta")
params: dict[str, torch.Tensor] = {}
for patch, patch_weight in patches_and_weights:

View File

@@ -219,9 +219,6 @@ class FluxCheckpointModel(ModelLoader):
assert isinstance(config, MainCheckpointConfig)
model_path = Path(config.path)
with accelerate.init_empty_weights():
model = Flux(params[config.config_path])
sd = load_file(model_path)
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
@@ -230,6 +227,11 @@ class FluxCheckpointModel(ModelLoader):
for k in sd.keys():
# We need to cast to bfloat16 due to it being the only currently supported dtype for inference
sd[k] = sd[k].to(torch.bfloat16)
flux_params = infer_flux_params_from_state_dict(sd)
with accelerate.init_empty_weights():
model = Flux(flux_params)
model.load_state_dict(sd, assign=True)
return model

View File

@@ -31,10 +31,6 @@ from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils
is_state_dict_likely_in_flux_kohya_format,
lora_model_from_flux_kohya_state_dict,
)
from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_utils import (
is_state_dict_likely_in_flux_onetrainer_format,
lora_model_from_flux_onetrainer_state_dict,
)
from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
@@ -88,12 +84,8 @@ class LoRALoader(ModelLoader):
elif config.format == ModelFormat.LyCORIS:
if is_state_dict_likely_in_flux_kohya_format(state_dict=state_dict):
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict=state_dict):
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
elif is_state_dict_likely_flux_control(state_dict=state_dict):
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
else:
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
else:
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:

View File

@@ -46,9 +46,6 @@ from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_ut
from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import (
is_state_dict_likely_in_flux_kohya_format,
)
from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_utils import (
is_state_dict_likely_in_flux_onetrainer_format,
)
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
@@ -286,7 +283,7 @@ class ModelProbe(object):
return ModelType.Main
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
return ModelType.VAE
elif key.startswith(("lora_te_", "lora_unet_", "lora_te1_", "lora_te2_", "lora_transformer_")):
elif key.startswith(("lora_te_", "lora_unet_")):
return ModelType.LoRA
# "lora_A.weight" and "lora_B.weight" are associated with models in PEFT format. We don't support all PEFT
# LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models.
@@ -635,7 +632,6 @@ class LoRACheckpointProbe(CheckpointProbeBase):
def get_base_type(self) -> BaseModelType:
if (
is_state_dict_likely_in_flux_kohya_format(self.checkpoint)
or is_state_dict_likely_in_flux_onetrainer_format(self.checkpoint)
or is_state_dict_likely_in_flux_diffusers_format(self.checkpoint)
or is_state_dict_likely_flux_control(self.checkpoint)
):

View File

@@ -0,0 +1,55 @@
from typing import Optional, Sequence
import torch
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
class ConcatenatedLoRALayer(LoRALayerBase):
"""A LoRA layer that is composed of multiple LoRA layers concatenated along a specified axis.
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
stored as separate tensors. This class enables diffusers LoRA layers to be used in BFL FLUX models.
"""
def __init__(self, lora_layers: Sequence[LoRALayer], concat_axis: int = 0):
super().__init__(alpha=None, bias=None)
self.lora_layers = lora_layers
self.concat_axis = concat_axis
def _rank(self) -> int | None:
return None
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
# TODO(ryand): Currently, we pass orig_weight=None to the sub-layers. If we want to support sub-layers that
# require this value, we will need to implement chunking of the original weight tensor here.
# Note that we must apply the sub-layer scales here.
layer_weights = [lora_layer.get_weight(None) * lora_layer.scale() for lora_layer in self.lora_layers] # pyright: ignore[reportArgumentType]
return torch.cat(layer_weights, dim=self.concat_axis)
def get_bias(self, orig_bias: torch.Tensor | None) -> Optional[torch.Tensor]:
# TODO(ryand): Currently, we pass orig_bias=None to the sub-layers. If we want to support sub-layers that
# require this value, we will need to implement chunking of the original bias tensor here.
# Note that we must apply the sub-layer scales here.
layer_biases: list[torch.Tensor] = []
for lora_layer in self.lora_layers:
layer_bias = lora_layer.get_bias(None)
if layer_bias is not None:
layer_biases.append(layer_bias * lora_layer.scale())
if len(layer_biases) == 0:
return None
assert len(layer_biases) == len(self.lora_layers)
return torch.cat(layer_biases, dim=self.concat_axis)
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
super().to(device=device, dtype=dtype)
for lora_layer in self.lora_layers:
lora_layer.to(device=device, dtype=dtype)
def calc_size(self) -> int:
return super().calc_size() + sum(lora_layer.calc_size() for lora_layer in self.lora_layers)

View File

@@ -1,115 +0,0 @@
from typing import Dict, Optional
import torch
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class DoRALayer(LoRALayerBase):
"""A DoRA layer. As defined in https://arxiv.org/pdf/2402.09353."""
def __init__(
self,
up: torch.Tensor,
down: torch.Tensor,
dora_scale: torch.Tensor,
alpha: float | None,
bias: Optional[torch.Tensor],
):
super().__init__(alpha, bias)
self.up = up
self.down = down
self.dora_scale = dora_scale
@classmethod
def from_state_dict_values(cls, values: Dict[str, torch.Tensor]):
alpha = cls._parse_alpha(values.get("alpha", None))
bias = cls._parse_bias(
values.get("bias_indices", None), values.get("bias_values", None), values.get("bias_size", None)
)
layer = cls(
up=values["lora_up.weight"],
down=values["lora_down.weight"],
dora_scale=values["dora_scale"],
alpha=alpha,
bias=bias,
)
cls.warn_on_unhandled_keys(
values=values,
handled_keys={
# Default keys.
"alpha",
"bias_indices",
"bias_values",
"bias_size",
# Layer-specific keys.
"lora_up.weight",
"lora_down.weight",
"dora_scale",
},
)
return layer
def _rank(self) -> int:
return self.down.shape[0]
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
orig_weight = cast_to_device(orig_weight, self.up.device)
# Note: Variable names (e.g. delta_v) are based on the paper.
delta_v = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
delta_v = delta_v.reshape(orig_weight.shape)
delta_v = delta_v * self.scale()
# At this point, out_weight is the unnormalized direction matrix.
out_weight = orig_weight + delta_v
# TODO(ryand): Simplify this logic.
direction_norm = (
out_weight.transpose(0, 1)
.reshape(out_weight.shape[1], -1)
.norm(dim=1, keepdim=True)
.reshape(out_weight.shape[1], *[1] * (out_weight.dim() - 1))
.transpose(0, 1)
)
out_weight *= self.dora_scale / direction_norm
return out_weight - orig_weight
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
super().to(device=device, dtype=dtype)
self.up = self.up.to(device=device, dtype=dtype)
self.down = self.down.to(device=device, dtype=dtype)
self.dora_scale = self.dora_scale.to(device=device, dtype=dtype)
def calc_size(self) -> int:
return super().calc_size() + calc_tensors_size([self.up, self.down, self.dora_scale])
def get_parameters(self, orig_parameters: dict[str, torch.Tensor], weight: float) -> dict[str, torch.Tensor]:
if any(p.device.type == "meta" for p in orig_parameters.values()):
# If any of the original parameters are on the 'meta' device, we assume this is because the base model is in
# a quantization format that doesn't allow easy dequantization.
raise RuntimeError(
"The base model quantization format (likely bitsandbytes) is not compatible with DoRA patches."
)
scale = self.scale()
params = {"weight": self.get_weight(orig_parameters["weight"]) * weight}
bias = self.get_bias(orig_parameters.get("bias", None))
if bias is not None:
params["bias"] = bias * (weight * scale)
# Reshape all params to match the original module's shape.
for param_name, param_weight in params.items():
orig_param = orig_parameters[param_name]
if param_weight.shape != orig_param.shape:
params[param_name] = param_weight.reshape(orig_param.shape)
return params

View File

@@ -4,7 +4,6 @@ import torch
import invokeai.backend.util.logging as logger
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
@@ -68,8 +67,8 @@ class LoRALayerBase(BaseLayerPatch):
# Reshape all params to match the original module's shape.
for param_name, param_weight in params.items():
orig_param = orig_parameters[param_name]
if param_weight.shape != get_param_shape(orig_param):
params[param_name] = param_weight.reshape(get_param_shape(orig_param))
if param_weight.shape != orig_param.shape:
params[param_name] = param_weight.reshape(orig_param.shape)
return params

View File

@@ -1,65 +0,0 @@
from dataclasses import dataclass
from typing import Sequence
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
@dataclass
class Range:
start: int
end: int
class MergedLayerPatch(BaseLayerPatch):
"""A patch layer that is composed of multiple sub-layers merged together.
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
stored as separate tensors. This class enables diffusers LoRA layers to be used in BFL FLUX models.
"""
def __init__(
self,
lora_layers: Sequence[BaseLayerPatch],
ranges: Sequence[Range],
):
super().__init__()
self.lora_layers = lora_layers
# self.ranges[i] is the range for the i'th lora layer along the 0'th weight dimension.
self.ranges = ranges
assert len(self.ranges) == len(self.lora_layers)
def get_parameters(self, orig_parameters: dict[str, torch.Tensor], weight: float) -> dict[str, torch.Tensor]:
out_parameters: dict[str, torch.Tensor] = {}
for lora_layer, range in zip(self.lora_layers, self.ranges, strict=True):
sliced_parameters: dict[str, torch.Tensor] = {
n: p[range.start : range.end] for n, p in orig_parameters.items()
}
# Note that `weight` is applied in the sub-layers, no need to apply it in this function.
layer_out_parameters = lora_layer.get_parameters(sliced_parameters, weight)
for out_param_name, out_param in layer_out_parameters.items():
if out_param_name not in out_parameters:
# If not already in the output dict, initialize an output tensor with the same shape as the full
# original parameter.
out_parameters[out_param_name] = torch.zeros(
get_param_shape(orig_parameters[out_param_name]),
dtype=out_param.dtype,
device=out_param.device,
)
out_parameters[out_param_name][range.start : range.end] += out_param
return out_parameters
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
for lora_layer in self.lora_layers:
lora_layer.to(device=device, dtype=dtype)
def calc_size(self) -> int:
return sum(lora_layer.calc_size() for lora_layer in self.lora_layers)

View File

@@ -1,19 +0,0 @@
import torch
try:
from bitsandbytes.nn.modules import Params4bit
bnb_available: bool = True
except ImportError:
bnb_available: bool = False
def get_param_shape(param: torch.Tensor) -> torch.Size:
"""A helper function to get the shape of a parameter that handles `bitsandbytes.nn.Params4Bit` correctly."""
# Accessing the `.shape` attribute of `bitsandbytes.nn.Params4Bit` will return an incorrect result. Instead, we must
# access the `.quant_state.shape` attribute.
if bnb_available and type(param) is Params4bit: # type: ignore
quant_state = param.quant_state
if quant_state is not None:
return quant_state.shape
return param.shape

View File

@@ -3,7 +3,6 @@ from typing import Dict
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.dora_layer import DoRALayer
from invokeai.backend.patches.layers.full_layer import FullLayer
from invokeai.backend.patches.layers.ia3_layer import IA3Layer
from invokeai.backend.patches.layers.loha_layer import LoHALayer
@@ -15,9 +14,8 @@ from invokeai.backend.patches.layers.norm_layer import NormLayer
def any_lora_layer_from_state_dict(state_dict: Dict[str, torch.Tensor]) -> BaseLayerPatch:
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules
if "dora_scale" in state_dict:
return DoRALayer.from_state_dict_values(state_dict)
elif "lora_up.weight" in state_dict:
if "lora_up.weight" in state_dict:
# LoRA a.k.a LoCon
return LoRALayer.from_state_dict_values(state_dict)
elif "hada_w1_a" in state_dict:

View File

@@ -3,8 +3,8 @@ from typing import Dict
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
@@ -33,21 +33,13 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
def lora_model_from_flux_diffusers_state_dict(
state_dict: Dict[str, torch.Tensor], alpha: float | None
) -> ModelPatchRaw:
# Group keys by layer.
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_by_layer(state_dict)
layers = lora_layers_from_flux_diffusers_grouped_state_dict(grouped_state_dict, alpha)
return ModelPatchRaw(layers=layers)
def lora_layers_from_flux_diffusers_grouped_state_dict(
grouped_state_dict: Dict[str, Dict[str, torch.Tensor]], alpha: float | None
) -> dict[str, BaseLayerPatch]:
"""Converts a grouped state dict with Diffusers FLUX LoRA keys to LoRA layers with BFL keys (i.e. the module key
format used by Invoke).
"""Loads a state dict in the Diffusers FLUX LoRA format into a LoRAModelRaw object.
This function is based on:
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
"""
# Group keys by layer.
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_by_layer(state_dict)
# Remove the "transformer." prefix from all keys.
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
@@ -61,26 +53,17 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
layers: dict[str, BaseLayerPatch] = {}
def get_lora_layer_values(src_layer_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
if "lora_A.weight" in src_layer_dict:
# The LoRA keys are in PEFT format.
values = {
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
if src_key in grouped_state_dict:
src_layer_dict = grouped_state_dict.pop(src_key)
value = {
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
}
if alpha is not None:
values["alpha"] = torch.tensor(alpha)
value["alpha"] = torch.tensor(alpha)
layers[dst_key] = LoRALayer.from_state_dict_values(values=value)
assert len(src_layer_dict) == 0
return values
else:
# Assume that the LoRA keys are in Kohya format.
return src_layer_dict
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
if src_key in grouped_state_dict:
src_layer_dict = grouped_state_dict.pop(src_key)
values = get_lora_layer_values(src_layer_dict)
layers[dst_key] = any_lora_layer_from_state_dict(values)
def add_qkv_lora_layer_if_present(
src_keys: list[str],
@@ -96,24 +79,29 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
if not any(keys_present):
return
dim_0_offset = 0
sub_layers: list[BaseLayerPatch] = []
sub_layer_ranges: list[Range] = []
sub_layers: list[LoRALayer] = []
for src_key, src_weight_shape in zip(src_keys, src_weight_shapes, strict=True):
src_layer_dict = grouped_state_dict.pop(src_key, None)
if src_layer_dict is not None:
values = get_lora_layer_values(src_layer_dict)
# assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
# assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
sub_layers.append(any_lora_layer_from_state_dict(values))
sub_layer_ranges.append(Range(dim_0_offset, dim_0_offset + src_weight_shape[0]))
values = {
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
}
if alpha is not None:
values["alpha"] = torch.tensor(alpha)
assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
assert len(src_layer_dict) == 0
else:
if not allow_missing_keys:
raise ValueError(f"Missing LoRA layer: '{src_key}'.")
dim_0_offset += src_weight_shape[0]
layers[dst_qkv_key] = MergedLayerPatch(sub_layers, sub_layer_ranges)
values = {
"lora_up.weight": torch.zeros((src_weight_shape[0], 1)),
"lora_down.weight": torch.zeros((1, src_weight_shape[1])),
}
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers)
# time_text_embed.timestep_embedder -> time_in.
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
@@ -229,7 +217,7 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
layers_with_prefix = {f"{FLUX_LORA_TRANSFORMER_PREFIX}{k}": v for k, v in layers.items()}
return layers_with_prefix
return ModelPatchRaw(layers=layers_with_prefix)
def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:

View File

@@ -7,7 +7,6 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.lora_conversions.flux_lora_constants import (
FLUX_LORA_CLIP_PREFIX,
FLUX_LORA_T5_PREFIX,
FLUX_LORA_TRANSFORMER_PREFIX,
)
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
@@ -27,14 +26,6 @@ FLUX_KOHYA_TRANSFORMER_KEY_REGEX = (
# lora_te1_text_model_encoder_layers_0_mlp_fc1.lora_up.weight
FLUX_KOHYA_CLIP_KEY_REGEX = r"lora_te1_text_model_encoder_layers_(\d+)_(mlp|self_attn)_(\w+)\.?.*"
# A regex pattern that matches all of the T5 keys in the Kohya FLUX LoRA format.
# Example keys:
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.alpha
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.dora_scale
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.lora_down.weight
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.lora_up.weight
FLUX_KOHYA_T5_KEY_REGEX = r"lora_te2_encoder_block_(\d+)_layer_(\d+)_(DenseReluDense|SelfAttention)_(\w+)_?(\w+)?\.?.*"
def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> bool:
"""Checks if the provided state dict is likely in the Kohya FLUX LoRA format.
@@ -43,9 +34,7 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
"""
return all(
re.match(FLUX_KOHYA_TRANSFORMER_KEY_REGEX, k)
or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
or re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
re.match(FLUX_KOHYA_TRANSFORMER_KEY_REGEX, k) or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
for k in state_dict.keys()
)
@@ -59,34 +48,27 @@ def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -
grouped_state_dict[layer_name] = {}
grouped_state_dict[layer_name][param_name] = value
# Split the grouped state dict into transformer, CLIP, and T5 state dicts.
# Split the grouped state dict into transformer and CLIP state dicts.
transformer_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
clip_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
t5_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
for layer_name, layer_state_dict in grouped_state_dict.items():
if layer_name.startswith("lora_unet"):
transformer_grouped_sd[layer_name] = layer_state_dict
elif layer_name.startswith("lora_te1"):
clip_grouped_sd[layer_name] = layer_state_dict
elif layer_name.startswith("lora_te2"):
t5_grouped_sd[layer_name] = layer_state_dict
else:
raise ValueError(f"Layer '{layer_name}' does not match the expected pattern for FLUX LoRA weights.")
# Convert the state dicts to the InvokeAI format.
transformer_grouped_sd = _convert_flux_transformer_kohya_state_dict_to_invoke_format(transformer_grouped_sd)
clip_grouped_sd = _convert_flux_clip_kohya_state_dict_to_invoke_format(clip_grouped_sd)
t5_grouped_sd = _convert_flux_t5_kohya_state_dict_to_invoke_format(t5_grouped_sd)
# Create LoRA layers.
layers: dict[str, BaseLayerPatch] = {}
for model_prefix, grouped_sd in [
(FLUX_LORA_TRANSFORMER_PREFIX, transformer_grouped_sd),
(FLUX_LORA_CLIP_PREFIX, clip_grouped_sd),
(FLUX_LORA_T5_PREFIX, t5_grouped_sd),
]:
for layer_key, layer_state_dict in grouped_sd.items():
layers[model_prefix + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
for layer_key, layer_state_dict in transformer_grouped_sd.items():
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
for layer_key, layer_state_dict in clip_grouped_sd.items():
layers[FLUX_LORA_CLIP_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
# Create and return the LoRAModelRaw.
return ModelPatchRaw(layers=layers)
@@ -141,31 +123,3 @@ def _convert_flux_transformer_kohya_state_dict_to_invoke_format(state_dict: Dict
raise ValueError(f"Key '{k}' does not match the expected pattern for FLUX LoRA weights.")
return converted_dict
def _convert_flux_t5_kohya_state_dict_to_invoke_format(state_dict: Dict[str, T]) -> Dict[str, T]:
"""Converts a T5 LoRA state dict from the Kohya FLUX LoRA format to LoRA weight format used internally by
InvokeAI.
Example key conversions:
"lora_te2_encoder_block_0_layer_0_SelfAttention_k" -> "encoder.block.0.layer.0.SelfAttention.k"
"lora_te2_encoder_block_0_layer_1_DenseReluDense_wi_0" -> "encoder.block.0.layer.1.DenseReluDense.wi.0"
"""
def replace_func(match: re.Match[str]) -> str:
s = f"encoder.block.{match.group(1)}.layer.{match.group(2)}.{match.group(3)}.{match.group(4)}"
if match.group(5):
s += f".{match.group(5)}"
return s
converted_dict: dict[str, T] = {}
for k, v in state_dict.items():
match = re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
if match:
new_key = re.sub(FLUX_KOHYA_T5_KEY_REGEX, replace_func, k)
converted_dict[new_key] = v
else:
raise ValueError(f"Key '{k}' does not match the expected pattern for FLUX LoRA weights.")
return converted_dict

View File

@@ -1,4 +1,3 @@
# Prefixes used to distinguish between transformer and CLIP text encoder keys in the FLUX InvokeAI LoRA format.
FLUX_LORA_TRANSFORMER_PREFIX = "lora_transformer-"
FLUX_LORA_CLIP_PREFIX = "lora_clip-"
FLUX_LORA_T5_PREFIX = "lora_t5-"

View File

@@ -1,163 +0,0 @@
import re
from typing import Any, Dict
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
lora_layers_from_flux_diffusers_grouped_state_dict,
)
from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import (
FLUX_KOHYA_CLIP_KEY_REGEX,
FLUX_KOHYA_T5_KEY_REGEX,
_convert_flux_clip_kohya_state_dict_to_invoke_format,
_convert_flux_t5_kohya_state_dict_to_invoke_format,
)
from invokeai.backend.patches.lora_conversions.flux_lora_constants import (
FLUX_LORA_CLIP_PREFIX,
FLUX_LORA_T5_PREFIX,
)
from invokeai.backend.patches.lora_conversions.kohya_key_utils import (
INDEX_PLACEHOLDER,
ParsingTree,
insert_periods_into_kohya_key,
)
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
# A regex pattern that matches all of the transformer keys in the OneTrainer FLUX LoRA format.
# The OneTrainer format uses a mix of the Kohya and Diffusers formats:
# - The base model keys are in Diffusers format.
# - Periods are replaced with underscores, to match Kohya.
# - The LoRA key suffixes (e.g. .alpha, .lora_down.weight, .lora_up.weight) match Kohya.
# Example keys:
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.alpha"
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.dora_scale"
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.lora_down.weight"
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.lora_up.weight"
FLUX_ONETRAINER_TRANSFORMER_KEY_REGEX = (
r"lora_transformer_(single_transformer_blocks|transformer_blocks)_(\d+)_(\w+)\.(.*)"
)
def is_state_dict_likely_in_flux_onetrainer_format(state_dict: Dict[str, Any]) -> bool:
"""Checks if the provided state dict is likely in the OneTrainer FLUX LoRA format.
This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
Note that OneTrainer matches the Kohya format for the CLIP and T5 models.
"""
return all(
re.match(FLUX_ONETRAINER_TRANSFORMER_KEY_REGEX, k)
or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
or re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
for k in state_dict.keys()
)
def lora_model_from_flux_onetrainer_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw: # type: ignore
# Group keys by layer.
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
for key, value in state_dict.items():
layer_name, param_name = key.split(".", 1)
if layer_name not in grouped_state_dict:
grouped_state_dict[layer_name] = {}
grouped_state_dict[layer_name][param_name] = value
# Split the grouped state dict into transformer, CLIP, and T5 state dicts.
transformer_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
clip_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
t5_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
for layer_name, layer_state_dict in grouped_state_dict.items():
if layer_name.startswith("lora_transformer"):
transformer_grouped_sd[layer_name] = layer_state_dict
elif layer_name.startswith("lora_te1"):
clip_grouped_sd[layer_name] = layer_state_dict
elif layer_name.startswith("lora_te2"):
t5_grouped_sd[layer_name] = layer_state_dict
else:
raise ValueError(f"Layer '{layer_name}' does not match the expected pattern for FLUX LoRA weights.")
# Convert the state dicts to the InvokeAI format.
clip_grouped_sd = _convert_flux_clip_kohya_state_dict_to_invoke_format(clip_grouped_sd)
t5_grouped_sd = _convert_flux_t5_kohya_state_dict_to_invoke_format(t5_grouped_sd)
# Create LoRA layers.
layers: dict[str, BaseLayerPatch] = {}
for model_prefix, grouped_sd in [
# (FLUX_LORA_TRANSFORMER_PREFIX, transformer_grouped_sd),
(FLUX_LORA_CLIP_PREFIX, clip_grouped_sd),
(FLUX_LORA_T5_PREFIX, t5_grouped_sd),
]:
for layer_key, layer_state_dict in grouped_sd.items():
layers[model_prefix + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
# Handle the transformer.
transformer_layers = _convert_flux_transformer_onetrainer_state_dict_to_invoke_format(transformer_grouped_sd)
layers.update(transformer_layers)
# Create and return the LoRAModelRaw.
return ModelPatchRaw(layers=layers)
# This parsing tree was generated by calling `generate_kohya_parsing_tree_from_keys()` on the keys in
# flux_lora_diffusers_format.py.
flux_transformer_kohya_parsing_tree: ParsingTree = {
"transformer": {
"single_transformer_blocks": {
INDEX_PLACEHOLDER: {
"attn": {"to_k": {}, "to_q": {}, "to_v": {}},
"norm": {"linear": {}},
"proj_mlp": {},
"proj_out": {},
}
},
"transformer_blocks": {
INDEX_PLACEHOLDER: {
"attn": {
"add_k_proj": {},
"add_q_proj": {},
"add_v_proj": {},
"to_add_out": {},
"to_k": {},
"to_out": {INDEX_PLACEHOLDER: {}},
"to_q": {},
"to_v": {},
},
"ff": {"net": {INDEX_PLACEHOLDER: {"proj": {}}}},
"ff_context": {"net": {INDEX_PLACEHOLDER: {"proj": {}}}},
"norm1": {"linear": {}},
"norm1_context": {"linear": {}},
}
},
}
}
def _convert_flux_transformer_onetrainer_state_dict_to_invoke_format(
state_dict: Dict[str, Dict[str, torch.Tensor]],
) -> dict[str, BaseLayerPatch]:
"""Converts a FLUX transformer LoRA state dict from the OneTrainer FLUX LoRA format to the LoRA weight format used
internally by InvokeAI.
"""
# Step 1: Convert the Kohya-style keys with underscores to classic keys with periods.
# Example:
# "lora_transformer_single_transformer_blocks_0_attn_to_k.lora_down.weight" -> "transformer.single_transformer_blocks.0.attn.to_k.lora_down.weight"
lora_prefix = "lora_"
lora_prefix_length = len(lora_prefix)
kohya_state_dict: dict[str, Dict[str, torch.Tensor]] = {}
for key in state_dict.keys():
# Remove the "lora_" prefix.
assert key.startswith(lora_prefix)
new_key = key[lora_prefix_length:]
# Add periods to the Kohya-style module keys.
new_key = insert_periods_into_kohya_key(new_key, flux_transformer_kohya_parsing_tree)
# Replace the old key with the new key.
kohya_state_dict[new_key] = state_dict[key]
# Step 2: Convert diffusers module names to the BFL module names.
return lora_layers_from_flux_diffusers_grouped_state_dict(kohya_state_dict, alpha=None)

View File

@@ -1,102 +0,0 @@
from typing import Iterable
INDEX_PLACEHOLDER = "index_placeholder"
# Type alias for a 'ParsingTree', which is a recursive dict with string keys.
ParsingTree = dict[str, "ParsingTree"]
def insert_periods_into_kohya_key(key: str, parsing_tree: ParsingTree) -> str:
"""Insert periods into a Kohya key based on a parsing tree.
Kohya format keys are produced by replacing periods with underscores in the original key.
Example:
```
key = "module_a_module_b_0_attn_to_k"
parsing_tree = {
"module_a": {
"module_b": {
INDEX_PLACEHOLDER: {
"attn": {},
},
},
},
}
result = insert_periods_into_kohya_key(key, parsing_tree)
> "module_a.module_b.0.attn.to_k"
```
"""
# Split key into parts by underscore.
parts = key.split("_")
# Build up result by walking through parsing tree and parts.
result_parts: list[str] = []
current_part = ""
current_tree = parsing_tree
for part in parts:
if len(current_part) > 0:
current_part = current_part + "_"
current_part += part
if current_part in current_tree:
# Match found.
current_tree = current_tree[current_part]
result_parts.append(current_part)
current_part = ""
elif current_part.isnumeric() and INDEX_PLACEHOLDER in current_tree:
# Match found with index placeholder.
current_tree = current_tree[INDEX_PLACEHOLDER]
result_parts.append(current_part)
current_part = ""
if len(current_part) > 0:
raise ValueError(f"Key {key} does not match parsing tree {parsing_tree}.")
return ".".join(result_parts)
def generate_kohya_parsing_tree_from_keys(keys: Iterable[str]) -> ParsingTree:
"""Generate a parsing tree from a list of keys.
Example:
```
keys = [
"module_a.module_b.0.attn.to_k",
"module_a.module_b.1.attn.to_k",
"module_a.module_c.proj",
]
tree = generate_kohya_parsing_tree_from_keys(keys)
> {
> "module_a": {
> "module_b": {
> INDEX_PLACEHOLDER: {
> "attn": {
> "to_k": {},
> "to_q": {},
> },
> }
> },
> "module_c": {
> "proj": {},
> }
> }
> }
```
"""
tree: ParsingTree = {}
for key in keys:
subtree: ParsingTree = tree
for module_name in key.split("."):
key = module_name
if module_name.isnumeric():
key = INDEX_PLACEHOLDER
if key not in subtree:
subtree[key] = {}
subtree = subtree[key]
return tree

View File

@@ -54,9 +54,7 @@ GGML_TENSOR_OP_TABLE = {
torch.ops.aten.addmm.default: dequantize_and_run, # pyright: ignore
torch.ops.aten.mul.Tensor: dequantize_and_run, # pyright: ignore
torch.ops.aten.add.Tensor: dequantize_and_run, # pyright: ignore
torch.ops.aten.sub.Tensor: dequantize_and_run, # pyright: ignore
torch.ops.aten.allclose.default: dequantize_and_run, # pyright: ignore
torch.ops.aten.slice.Tensor: dequantize_and_run, # pyright: ignore
}
if torch.backends.mps.is_available():

View File

@@ -23,12 +23,6 @@ module.exports = {
property: 'randomUUID',
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
},
{
object: 'navigator',
property: 'clipboard',
message:
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
},
],
},
overrides: [

View File

@@ -11,11 +11,9 @@
<link id="invoke-favicon" rel="icon" type="icon" href="assets/images/invoke-favicon.svg" />
<style>
html,
body,
#root {
body {
padding: 0;
margin: 0;
overflow: hidden;
}
</style>
</head>
@@ -25,4 +23,4 @@
<script type="module" src="/src/main.tsx"></script>
</body>
</html>
</html>

View File

@@ -58,11 +58,10 @@
"@dagrejs/dagre": "^1.1.4",
"@dagrejs/graphlib": "^2.2.4",
"@fontsource-variable/inter": "^5.1.0",
"@invoke-ai/ui-library": "^0.0.46",
"@invoke-ai/ui-library": "^0.0.44",
"@nanostores/react": "^0.7.3",
"@reduxjs/toolkit": "2.5.1",
"@reduxjs/toolkit": "2.2.3",
"@roarr/browser-log-writer": "^1.3.0",
"@xyflow/react": "^12.4.2",
"async-mutex": "^0.5.0",
"chakra-react-select": "^4.9.2",
"cmdk": "^1.0.0",
@@ -75,8 +74,6 @@
"idb-keyval": "^6.2.1",
"jsondiffpatch": "^0.6.0",
"konva": "^9.3.15",
"linkify-react": "^4.2.0",
"linkifyjs": "^4.2.0",
"lodash-es": "^4.17.21",
"lru-cache": "^11.0.1",
"mtwist": "^1.0.2",
@@ -99,9 +96,9 @@
"react-icons": "^5.3.0",
"react-redux": "9.1.2",
"react-resizable-panels": "^2.1.4",
"react-textarea-autosize": "^8.5.7",
"react-use": "^17.5.1",
"react-virtuoso": "^4.10.4",
"reactflow": "^11.11.4",
"redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^5.1.0",
"redux-undo": "^1.1.0",
@@ -129,7 +126,7 @@
"@storybook/addon-storysource": "^8.3.4",
"@storybook/manager-api": "^8.3.4",
"@storybook/react": "^8.3.4",
"@storybook/react-vite": "^8.5.5",
"@storybook/react-vite": "^8.3.4",
"@storybook/theming": "^8.3.4",
"@types/dateformat": "^5.0.2",
"@types/lodash-es": "^4.17.12",
@@ -137,9 +134,9 @@
"@types/react": "^18.3.11",
"@types/react-dom": "^18.3.0",
"@types/uuid": "^10.0.0",
"@vitejs/plugin-react-swc": "^3.8.0",
"@vitest/coverage-v8": "^3.0.6",
"@vitest/ui": "^3.0.6",
"@vitejs/plugin-react-swc": "^3.7.1",
"@vitest/coverage-v8": "^1.6.0",
"@vitest/ui": "^1.6.0",
"concurrently": "^8.2.2",
"csstype": "^3.1.3",
"dpdm": "^3.14.0",
@@ -155,12 +152,12 @@
"tsafe": "^1.7.5",
"type-fest": "^4.26.1",
"typescript": "^5.6.2",
"vite": "^6.1.0",
"vite": "^5.4.8",
"vite-plugin-css-injected-by-js": "^3.5.2",
"vite-plugin-dts": "^4.5.0",
"vite-plugin-dts": "^3.9.1",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^5.1.4",
"vitest": "^3.0.6"
"vite-tsconfig-paths": "^4.3.2",
"vitest": "^1.6.0"
},
"engines": {
"pnpm": "8"

File diff suppressed because it is too large Load Diff

View File

@@ -99,21 +99,7 @@
"clipboard": "Zwischenablage",
"generating": "Generieren",
"loadingModel": "Lade Modell",
"warnings": "Warnungen",
"start": "Starten",
"count": "Anzahl",
"step": "Schritt",
"values": "Werte",
"min": "Min",
"max": "Max",
"resetToDefaults": "Auf Standard zurücksetzen",
"seed": "Seed",
"row": "Reihe",
"column": "Spalte",
"end": "Ende",
"layout": "Layout",
"board": "Ordner",
"combinatorial": "Kombinatorisch"
"warnings": "Warnungen"
},
"gallery": {
"galleryImageSize": "Bildgröße",
@@ -133,6 +119,7 @@
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
"noImageSelected": "Kein Bild ausgewählt",
"starImage": "Bild markieren",
"assets": "Ressourcen",
"unstarImage": "Markierung entfernen",
"image": "Bild",
"deleteSelection": "Lösche Auswahl",
@@ -622,9 +609,7 @@
"hfTokenUnableToVerify": "HF-Token kann nicht überprüft werden",
"hfTokenUnableToVerifyErrorMessage": "HuggingFace-Token kann nicht überprüft werden. Dies ist wahrscheinlich auf einen Netzwerkfehler zurückzuführen. Bitte versuchen Sie es später erneut.",
"hfTokenSaved": "HF-Token gespeichert",
"hfTokenRequired": "Sie versuchen, ein Modell herunterzuladen, für das ein gültiges HuggingFace-Token erforderlich ist.",
"urlUnauthorizedErrorMessage2": "Hier erfahren wie.",
"urlForbidden": "Sie haben keinen Zugriff auf dieses Modell"
"hfTokenRequired": "Sie versuchen, ein Modell herunterzuladen, für das ein gültiges HuggingFace-Token erforderlich ist."
},
"parameters": {
"images": "Bilder",
@@ -691,8 +676,7 @@
"iterations": "Iterationen",
"guidance": "Führung",
"coherenceMode": "Modus",
"recallMetadata": "Metadaten abrufen",
"gaussianBlur": "Gaußsche Unschärfe"
"recallMetadata": "Metadaten abrufen"
},
"settings": {
"displayInProgress": "Zwischenbilder anzeigen",
@@ -892,8 +876,7 @@
"canvas": "Leinwand",
"prompts_one": "Prompt",
"prompts_other": "Prompts",
"batchSize": "Stapelgröße",
"confirm": "Bestätigen"
"batchSize": "Stapelgröße"
},
"metadata": {
"negativePrompt": "Negativ Beschreibung",
@@ -1299,22 +1282,7 @@
"unknownFieldType": "$t(nodes.unknownField) Typ: {{type}}",
"unknownField": "Unbekanntes Feld",
"unableToUpdateNodes_one": "{{count}} Knoten kann nicht aktualisiert werden",
"unableToUpdateNodes_other": "{{count}} Knoten können nicht aktualisiert werden",
"uniformRandomDistribution": "Uniforme Zufallsverteilung",
"linearDistribution": "Lineare Verteilung",
"generatorNRandomValues_one": "{{count}} Zufallswert",
"generatorNRandomValues_other": "{{count}} Zufallswerte",
"arithmeticSequence": "Arithmetische Folge",
"noBatchGroup": "keine Gruppe",
"generatorNoValues": "leer",
"generatorLoading": "wird geladen",
"generatorLoadFromFile": "Aus Datei laden",
"showEdgeLabels": "Kantenbeschriftungen anzeigen",
"downloadWorkflowError": "Fehler beim Herunterladen des Arbeitsablaufs",
"nodeName": "Knotenname",
"description": "Beschreibung",
"loadWorkflowDesc": "Arbeitsablauf laden?",
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen."
"unableToUpdateNodes_other": "{{count}} Knoten können nicht aktualisiert werden"
},
"hrf": {
"enableHrf": "Korrektur für hohe Auflösungen",

View File

@@ -90,7 +90,6 @@
"back": "Back",
"batch": "Batch Manager",
"beta": "Beta",
"board": "Board",
"cancel": "Cancel",
"close": "Close",
"copy": "Copy",
@@ -188,10 +187,7 @@
"values": "Values",
"resetToDefaults": "Reset to Defaults",
"seed": "Seed",
"combinatorial": "Combinatorial",
"layout": "Layout",
"row": "Row",
"column": "Column"
"combinatorial": "Combinatorial"
},
"hrf": {
"hrf": "High Resolution Fix",
@@ -223,15 +219,9 @@
"pauseSucceeded": "Processor Paused",
"pauseFailed": "Problem Pausing Processor",
"cancel": "Cancel",
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
"cancelTooltip": "Cancel Current Item",
"cancelSucceeded": "Item Canceled",
"cancelFailed": "Problem Canceling Item",
"retrySucceeded": "Item Retried",
"retryFailed": "Problem Retrying Item",
"confirm": "Confirm",
"prune": "Prune",
"pruneTooltip": "Prune {{item_count}} Completed Items",
"pruneSucceeded": "Pruned {{item_count}} Completed Items from Queue",
@@ -242,7 +232,6 @@
"clearFailed": "Problem Clearing Queue",
"cancelBatch": "Cancel Batch",
"cancelItem": "Cancel Item",
"retryItem": "Retry Item",
"cancelBatchSucceeded": "Batch Canceled",
"cancelBatchFailed": "Problem Canceling Batch",
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
@@ -305,16 +294,10 @@
"disableFailed": "Problem Disabling Invocation Cache",
"useCache": "Use Cache"
},
"modelCache": {
"clear": "Clear Model Cache",
"clearSucceeded": "Model Cache Cleared",
"clearFailed": "Problem Clearing Model Cache"
},
"gallery": {
"gallery": "Gallery",
"images": "Images",
"assets": "Assets",
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
"assets": "Assets",
"assetsTab": "Files youve uploaded for use in your projects.",
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
"autoSwitchNewImages": "Auto-Switch to New Images",
@@ -883,15 +866,11 @@
"parseString": "Parse String",
"splitOn": "Split On",
"noBatchGroup": "no group",
"generatorImagesCategory": "Category",
"generatorImages_one": "{{count}} image",
"generatorImages_other": "{{count}} images",
"generatorNRandomValues_one": "{{count}} random value",
"generatorNRandomValues_other": "{{count}} random values",
"generatorNoValues": "empty",
"generatorLoading": "loading",
"generatorLoadFromFile": "Load from File",
"generatorImagesFromBoard": "Images from Board",
"dynamicPromptsRandom": "Dynamic Prompts (Random)",
"dynamicPromptsCombinatorial": "Dynamic Prompts (Combinatorial)",
"addNode": "Add Node",
@@ -908,8 +887,6 @@
"missingNode": "Missing invocation node",
"missingInvocationTemplate": "Missing invocation template",
"missingFieldTemplate": "Missing field template",
"missingSourceOrTargetNode": "Missing source or target node",
"missingSourceOrTargetHandle": "Missing source or target handle",
"nodePack": "Node pack",
"collection": "Collection",
"singleFieldType": "{{name}} (Single)",
@@ -921,7 +898,6 @@
"currentImage": "Current Image",
"currentImageDescription": "Displays the current image in the Node Editor",
"downloadWorkflow": "Download Workflow JSON",
"downloadWorkflowError": "Error downloading workflow",
"edge": "Edge",
"edit": "Edit",
"editMode": "Edit in Workflow Editor",
@@ -946,7 +922,6 @@
"noWorkflows": "No Workflows",
"noMatchingWorkflows": "No Matching Workflows",
"noWorkflow": "No Workflow",
"unableToUpdateNode": "Node update failed: node {{node}} of type {{type}} (may require deleting and recreating)",
"mismatchedVersion": "Invalid node: node {{node}} of type {{type}} has mismatched version (try updating?)",
"missingTemplate": "Invalid node: node {{node}} of type {{type}} missing template (not installed?)",
"sourceNodeDoesNotExist": "Invalid edge: source/output node {{node}} does not exist",
@@ -954,14 +929,12 @@
"sourceNodeFieldDoesNotExist": "Invalid edge: source/output field {{node}}.{{field}} does not exist",
"targetNodeFieldDoesNotExist": "Invalid edge: target/input field {{node}}.{{field}} does not exist",
"deletedInvalidEdge": "Deleted invalid edge {{source}} -> {{target}}",
"deletedMissingNodeFieldFormElement": "Deleted missing form field: node {{nodeId}} field {{fieldName}}",
"noConnectionInProgress": "No connection in progress",
"node": "Node",
"nodeOutputs": "Node Outputs",
"nodeSearch": "Search for nodes",
"nodeTemplate": "Node Template",
"nodeType": "Node Type",
"nodeName": "Node Name",
"noFieldsLinearview": "No fields added to Linear View",
"noFieldsViewMode": "This workflow has no selected fields to display. View the full workflow to configure values.",
"workflowHelpText": "Need Help? Check out our guide to <LinkComponent>Getting Started with Workflows</LinkComponent>.",
@@ -970,7 +943,6 @@
"nodeVersion": "Node Version",
"noOutputRecorded": "No outputs recorded",
"notes": "Notes",
"description": "Description",
"notesDescription": "Add notes about your workflow",
"problemSettingTitle": "Problem Setting Title",
"resetToDefaultValue": "Reset to default value",
@@ -980,8 +952,6 @@
"newWorkflow": "New Workflow",
"newWorkflowDesc": "Create a new workflow?",
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
"loadWorkflowDesc": "Load workflow?",
"loadWorkflowDesc2": "Your current workflow has unsaved changes.",
"clearWorkflow": "Clear Workflow",
"clearWorkflowDesc": "Clear this workflow and start a new one?",
"clearWorkflowDesc2": "Your current workflow has unsaved changes.",
@@ -1011,7 +981,6 @@
"unknownOutput": "Unknown output: {{name}}",
"updateNode": "Update Node",
"updateApp": "Update App",
"loadingTemplates": "Loading {{name}}",
"updateAllNodes": "Update Nodes",
"allNodesUpdated": "All Nodes Updated",
"unableToUpdateNodes_one": "Unable to update {{count}} node",
@@ -1087,7 +1056,7 @@
"emptyBatches": "empty batches",
"batchNodeNotConnected": "Batch node not connected: {{label}}",
"batchNodeEmptyCollection": "Some batch nodes have empty collections",
"collectionEmpty": "empty collection",
"invalidBatchConfigurationCannotCalculate": "Invalid batch configuration; cannot calculate",
"collectionTooFewItems": "too few items, minimum {{minItems}}",
"collectionTooManyItems": "too many items, maximum {{maxItems}}",
"collectionStringTooLong": "too long, max {{maxLength}}",
@@ -1097,7 +1066,6 @@
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (exc max)",
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (exc min)",
"collectionNumberNotMultipleOf": "{{value}} not multiple of {{multipleOf}}",
"batchNodeCollectionSizeMismatchNoGroupId": "Batch group collection size mismatch",
"batchNodeCollectionSizeMismatch": "Collection size mismatch on Batch {{batchGroupId}}",
"noModelSelected": "No model selected",
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
@@ -1271,8 +1239,6 @@
"problemCopyingLayer": "Unable to Copy Layer",
"problemSavingLayer": "Unable to Save Layer",
"problemDownloadingImage": "Unable to Download Image",
"pasteSuccess": "Pasted to {{destination}}",
"pasteFailed": "Paste Failed",
"prunedQueue": "Pruned Queue",
"sentToCanvas": "Sent to Canvas",
"sentToUpscale": "Sent to Upscale",
@@ -1289,10 +1255,7 @@
"workflowLoaded": "Workflow Loaded",
"problemRetrievingWorkflow": "Problem Retrieving Workflow",
"workflowDeleted": "Workflow Deleted",
"problemDeletingWorkflow": "Problem Deleting Workflow",
"unableToCopy": "Unable to Copy",
"unableToCopyDesc": "Your browser does not support clipboard access. Firefox users may be able to fix this by following ",
"unableToCopyDesc_theseSteps": "these steps"
"problemDeletingWorkflow": "Problem Deleting Workflow"
},
"popovers": {
"clipSkip": {
@@ -1717,38 +1680,7 @@
"download": "Download",
"copyShareLink": "Copy Share Link",
"copyShareLinkForWorkflow": "Copy Share Link for Workflow",
"delete": "Delete",
"openLibrary": "Open Library",
"builder": {
"deleteAllElements": "Delete All Form Elements",
"resetAllNodeFields": "Reset All Node Fields",
"builder": "Form Builder",
"layout": "Layout",
"row": "Row",
"column": "Column",
"container": "Container",
"heading": "Heading",
"text": "Text",
"divider": "Divider",
"nodeField": "Node Field",
"zoomToNode": "Zoom to Node",
"nodeFieldTooltip": "To add a node field, click the small plus sign button on the field in the Workflow Editor, or drag the field by its name into the form.",
"addToForm": "Add to Form",
"label": "Label",
"showDescription": "Show Description",
"component": "Component",
"numberInput": "Number Input",
"singleLine": "Single Line",
"multiLine": "Multi Line",
"slider": "Slider",
"both": "Both",
"emptyRootPlaceholderViewMode": "Click Edit to start building a form for this workflow.",
"emptyRootPlaceholderEditMode": "Drag a form element or node field here to get started.",
"containerPlaceholder": "Empty Container",
"headingPlaceholder": "Empty Heading",
"textPlaceholder": "Empty Text",
"workflowBuilderAlphaWarning": "The workflow builder is currently in alpha. There may be breaking changes before the stable release."
}
"delete": "Delete"
},
"controlLayers": {
"regional": "Regional",
@@ -1763,8 +1695,6 @@
"cropLayerToBbox": "Crop Layer to Bbox",
"savedToGalleryOk": "Saved to Gallery",
"savedToGalleryError": "Error saving to gallery",
"regionCopiedToClipboard": "{{region}} Copied to Clipboard",
"copyRegionError": "Error copying {{region}}",
"newGlobalReferenceImageOk": "Created Global Reference Image",
"newGlobalReferenceImageError": "Problem Creating Global Reference Image",
"newRegionalReferenceImageOk": "Created Regional Reference Image",
@@ -1875,14 +1805,6 @@
"newControlLayer": "New $t(controlLayers.controlLayer)",
"newInpaintMask": "New $t(controlLayers.inpaintMask)",
"newRegionalGuidance": "New $t(controlLayers.regionalGuidance)",
"pasteTo": "Paste To",
"pasteToAssets": "Assets",
"pasteToAssetsDesc": "Paste to Assets",
"pasteToBbox": "Bbox",
"pasteToBboxDesc": "New Layer (in Bbox)",
"pasteToCanvas": "Canvas",
"pasteToCanvasDesc": "New Layer (in Canvas)",
"pastedTo": "Pasted to {{destination}}",
"transparency": "Transparency",
"enableTransparencyEffect": "Enable Transparency Effect",
"disableTransparencyEffect": "Disable Transparency Effect",
@@ -1911,7 +1833,7 @@
"resetGenerationSettings": "Reset Generation Settings",
"replaceCurrent": "Replace Current",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
"warnings": {
"problemsFound": "Problems found",
"unsupportedModel": "layer not supported for selected base model",
@@ -1927,10 +1849,6 @@
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
"rgNoRegion": "no region drawn"
},
"errors": {
"unableToFindImage": "Unable to find image",
"unableToLoadImage": "Unable to Load Image"
},
"controlMode": {
"controlMode": "Control Mode",
"balanced": "Balanced (recommended)",
@@ -2071,30 +1989,6 @@
"salt_and_pepper_type": "Salt and Pepper",
"noise_color": "Colored Noise",
"size": "Noise Size"
},
"adjust_image": {
"label": "Adjust Image",
"description": "Adjusts the selected channel of an image.",
"channel": "Channel",
"value_setting": "Value",
"scale_values": "Scale Values",
"red": "Red (RGBA)",
"green": "Green (RGBA)",
"blue": "Blue (RGBA)",
"alpha": "Alpha (RGBA)",
"cyan": "Cyan (CMYK)",
"magenta": "Magenta (CMYK)",
"yellow": "Yellow (CMYK)",
"black": "Black (CMYK)",
"hue": "Hue (HSV)",
"saturation": "Saturation (HSV)",
"value": "Value (HSV)",
"luminosity": "Luminosity (LAB)",
"a": "A (LAB)",
"b": "B (LAB)",
"y": "Y (YCbCr)",
"cb": "Cb (YCbCr)",
"cr": "Cr (YCbCr)"
}
},
"transform": {
@@ -2168,10 +2062,7 @@
"newRasterLayer": "New Raster Layer",
"newInpaintMask": "New Inpaint Mask",
"newRegionalGuidance": "New Regional Guidance",
"cropCanvasToBbox": "Crop Canvas to Bbox",
"copyToClipboard": "Copy to Clipboard",
"copyCanvasToClipboard": "Copy Canvas to Clipboard",
"copyBboxToClipboard": "Copy Bbox to Clipboard"
"cropCanvasToBbox": "Crop Canvas to Bbox"
},
"stagingArea": {
"accept": "Accept",
@@ -2305,10 +2196,7 @@
},
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"Memory Management: New setting for users with Nvidia GPUs to reduce VRAM usage.",
"Performance: Continued improvements to overall application performance and responsiveness."
],
"items": ["Low-VRAM mode", "Dynamic memory management", "Faster model loading times", "Fewer memory errors"],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",
"watchUiUpdatesOverview": "Watch UI Updates Overview"

View File

@@ -109,6 +109,7 @@
"deleteImage_many": "Eliminar {{count}} Imágenes",
"deleteImage_other": "Eliminar {{count}} Imágenes",
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
"assets": "Activos",
"autoAssignBoardOnClick": "Asignar automática tableros al hacer clic",
"gallery": "Galería",
"noImageSelected": "Sin imágenes seleccionadas",
@@ -900,7 +901,9 @@
}
},
"newUserExperience": {
"downloadStarterModels": "Descargar modelos de inicio",
"toGetStarted": "Para empezar, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en <StrongComponent>Lienzo</StrongComponent>.",
"importModels": "Importar modelos",
"noModelsInstalled": "Parece que no tienes ningún modelo instalado",
"gettingStartedSeries": "¿Desea más orientación? Consulte nuestra <LinkComponent>Serie de introducción</LinkComponent> para obtener consejos sobre cómo aprovechar todo el potencial de Invoke Studio.",
"toGetStartedLocal": "Para empezar, asegúrate de descargar o importar los modelos necesarios para ejecutar Invoke. A continuación, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en el <StrongComponent>Lienzo</StrongComponent>."

View File

@@ -98,22 +98,7 @@
"close": "Fermer",
"clipboard": "Presse-papier",
"loadingModel": "Chargement du modèle",
"generating": "En Génération",
"warnings": "Alertes",
"layout": "Disposition",
"row": "Ligne",
"column": "Colonne",
"start": "Commencer",
"board": "Planche",
"count": "Quantité",
"step": "Étape",
"end": "Fin",
"min": "Min",
"max": "Max",
"values": "Valeurs",
"resetToDefaults": "Réinitialiser par défaut",
"seed": "Graine",
"combinatorial": "Combinatoire"
"generating": "En Génération"
},
"gallery": {
"galleryImageSize": "Taille de l'image",
@@ -129,6 +114,7 @@
"sortDirection": "Direction de tri",
"sideBySide": "Côte-à-Côte",
"hover": "Au passage de la souris",
"assets": "Ressources",
"alwaysShowImageSizeBadge": "Toujours montrer le badge de taille de l'Image",
"gallery": "Galerie",
"bulkDownloadRequestFailed": "Problème lors de la préparation du téléchargement",
@@ -180,9 +166,7 @@
"imagesSettings": "Paramètres des images de la galerie",
"assetsTab": "Fichiers que vous avez importés pour vos projets.",
"imagesTab": "Images que vous avez créées et enregistrées dans Invoke.",
"boardsSettings": "Paramètres des planches",
"assets": "Ressources",
"images": "Images"
"boardsSettings": "Paramètres des planches"
},
"modelManager": {
"modelManager": "Gestionnaire de modèle",
@@ -306,7 +290,7 @@
"usingDefaultSettings": "Utilisation des paramètres par défaut du modèle",
"defaultSettingsOutOfSync": "Certain paramètres ne correspondent pas aux valeurs par défaut du modèle :",
"restoreDefaultSettings": "Cliquez pour utiliser les paramètres par défaut du modèle.",
"hfForbiddenErrorMessage": "Nous vous recommandons de visiter la page du modèle. Le propriétaire peut exiger l'acceptation des conditions pour pouvoir télécharger.",
"hfForbiddenErrorMessage": "Nous vous recommandons de visiter la page du modèle sur HuggingFace.com. Le propriétaire peut exiger l'acceptation des conditions pour pouvoir télécharger.",
"hfTokenRequired": "Vous essayez de télécharger un modèle qui nécessite un token HuggingFace valide.",
"clipLEmbed": "CLIP-L Embed",
"hfTokenSaved": "Token HF enregistré",
@@ -318,12 +302,7 @@
"hfTokenHelperText": "Un token HF est requis pour utiliser certains modèles. Cliquez ici pour créer ou obtenir votre token.",
"hfTokenInvalid": "Token HF invalide ou manquant",
"hfForbidden": "Vous n'avez pas accès à ce modèle HF.",
"hfTokenInvalidErrorMessage2": "Mettre à jour dans le ",
"controlLora": "Controle LoRA",
"urlUnauthorizedErrorMessage2": "Découvrir comment ici.",
"urlUnauthorizedErrorMessage": "Vous devrez peut-être configurer un jeton API pour accéder à ce modèle.",
"urlForbidden": "Vous n'avez pas accès à ce modèle",
"urlForbiddenErrorMessage": "Vous devrez peut-être demander l'autorisation du site qui distribue le modèle."
"hfTokenInvalidErrorMessage2": "Mettre à jour dans le "
},
"parameters": {
"images": "Images",
@@ -354,7 +333,7 @@
"showOptionsPanel": "Afficher le panneau latéral (O ou T)",
"invoke": {
"noPrompts": "Aucun prompts généré",
"missingInputForField": "entrée manquante",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} entrée manquante",
"missingFieldTemplate": "Modèle de champ manquant",
"invoke": "Invoke",
"addingImagesTo": "Ajouter des images à",
@@ -365,31 +344,18 @@
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la hauteur de la bounding box est {{height}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la hauteur de la bounding box est {{height}}",
"noFLUXVAEModelSelected": "Aucun modèle VAE sélectionné pour la génération FLUX",
"canvasIsTransforming": "La Toile est occupée (en transformation)",
"canvasIsRasterizing": "La Toile est occupée (en rastérisation)",
"canvasIsTransforming": "La Toile se transforme",
"canvasIsRasterizing": "La Toile se rastérise",
"noCLIPEmbedModelSelected": "Aucun modèle CLIP Embed sélectionné pour la génération FLUX",
"canvasIsFiltering": "La Toile est occupée (en filtration)",
"canvasIsFiltering": "La Toile est en train de filtrer",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box est {{width}}",
"noT5EncoderModelSelected": "Aucun modèle T5 Encoder sélectionné pour la génération FLUX",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box mise à l'échelle est {{width}}",
"canvasIsCompositing": "La Toile est occupée (en composition)",
"collectionTooFewItems": "trop peu d'éléments, minimum {{minItems}}",
"collectionTooManyItems": "trop d'éléments, maximum {{maxItems}}",
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)",
"emptyBatches": "lots vides",
"batchNodeNotConnected": "Noeud de lots non connecté : {{label}}",
"fluxModelMultipleControlLoRAs": "Vous ne pouvez utiliser qu'un seul Control LoRA à la fois",
"collectionNumberLTMin": "{{value}} < {{minimum}} (incl. min)",
"collectionNumberGTMax": "{{value}} > {{maximum}} (incl. max)",
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (max exc)",
"batchNodeEmptyCollection": "Certains nœuds de lot ont des collections vides",
"batchNodeCollectionSizeMismatch": "Non-concordance de taille de collection sur le lot {{batchGroupId}}",
"collectionStringTooLong": "trop long, max {{maxLength}}",
"collectionNumberNotMultipleOf": "{{value}} n'est pas un multiple de {{multipleOf}}",
"collectionEmpty": "collection vide",
"collectionStringTooShort": "trop court, min {{minLength}}",
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (min exc)",
"batchNodeCollectionSizeMismatchNoGroupId": "Taille de collection de groupe par lot non conforme"
"canvasIsCompositing": "La toile est en train de composer",
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} collection vide",
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}} : trop peu d'éléments, minimum {{minItems}}",
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}} : trop d'éléments, maximum {{maxItems}}",
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)"
},
"negativePromptPlaceholder": "Prompt Négatif",
"positivePromptPlaceholder": "Prompt Positif",
@@ -533,13 +499,7 @@
"uploadFailedInvalidUploadDesc_withCount_one": "Doit être au maximum une image PNG ou JPEG.",
"uploadFailedInvalidUploadDesc_withCount_many": "Doit être au maximum {{count}} images PNG ou JPEG.",
"uploadFailedInvalidUploadDesc_withCount_other": "Doit être au maximum {{count}} images PNG ou JPEG.",
"addedToUncategorized": "Ajouté aux ressources de la planche $t(boards.uncategorized)",
"pasteSuccess": "Collé à {{destination}}",
"pasteFailed": "Échec du collage",
"outOfMemoryErrorDescLocal": "Suivez notre <LinkComponent>guide Low VRAM</LinkComponent> pour réduire les OOMs.",
"unableToCopy": "Incapable de Copier",
"unableToCopyDesc": "Votre navigateur ne prend pas en charge l'accès au presse-papiers. Les utilisateurs de Firefox peuvent peut-être résoudre ce problème en suivant ",
"unableToCopyDesc_theseSteps": "ces étapes"
"addedToUncategorized": "Ajouté aux ressources de la planche $t(boards.uncategorized)"
},
"accessibility": {
"uploadImage": "Importer une image",
@@ -697,14 +657,7 @@
"iterations_many": "Itérations",
"iterations_other": "Itérations",
"back": "fin",
"batchSize": "Taille de lot",
"retryFailed": "Problème de nouvelle tentative de l'élément",
"retrySucceeded": "Élément Retenté",
"retryItem": "Réessayer l'élement",
"cancelAllExceptCurrentQueueItemAlertDialog": "Annuler tous les éléments de la file d'attente, sauf celui en cours, arrêtera les éléments en attente mais permettra à celui en cours de se terminer.",
"cancelAllExceptCurrentQueueItemAlertDialog2": "Êtes-vous sûr de vouloir annuler tous les éléments en attente dans la file d'attente?",
"cancelAllExceptCurrentTooltip": "Annuler tout sauf l'élément actuel",
"confirm": "Confirmer"
"batchSize": "Taille de lot"
},
"prompt": {
"noMatchingTriggers": "Pas de déclancheurs correspondants",
@@ -1076,9 +1029,7 @@
"controlNetWeight": {
"heading": "Poids",
"paragraphs": [
"Poids du Control Adapter. Un poids plus élevé aura un impact plus important sur l'image finale.",
"• Poids plus élevé (.75-2) : Crée un impact plus significatif sur le résultat final.",
"• Poids inférieur (0-.75) : Crée un impact plus faible sur le résultat final."
"Poids du Control Adapter. Un poids plus élevé aura un impact plus important sur l'image finale."
]
},
"compositingMaskAdjustments": {
@@ -1123,9 +1074,8 @@
"controlNetBeginEnd": {
"heading": "Pourcentage de début / de fin d'étape",
"paragraphs": [
"Ce paramètre détérmine quelle portion du processus de débruitage (génération) utilisera cette couche comme guide.",
"En général, les Control Adapter appliqués au début du processus guident la composition, tandis que les Control Adapter appliqués à la fin guident les détails.",
"• Étape de fin (%): Spécifie quand arrêter d'appliquer le guide de cette couche et revenir aux guides généraux du modèle et aux autres paramètres."
"La partie du processus de débruitage à laquelle le Control Adapter sera appliqué.",
"En général, les Control Adapter appliqués au début du processus guident la composition, tandis que les Control Adapter appliqués à la fin guident les détails."
]
},
"controlNetControlMode": {
@@ -1490,8 +1440,7 @@
"showDynamicPrompts": "Afficher les Prompts dynamiques",
"dynamicPrompts": "Prompts Dynamiques",
"promptsPreview": "Prévisualisation des Prompts",
"loading": "Génération des Pompts Dynamiques...",
"promptsToGenerate": "Prompts à générer"
"loading": "Génération des Pompts Dynamiques..."
},
"metadata": {
"positivePrompt": "Prompt Positif",
@@ -1683,41 +1632,7 @@
"boardAccessError": "Impossible de trouver la planche {{board_id}}, réinitialisation à la valeur par défaut",
"workflowHelpText": "Besoin d'aide? Consultez notre guide sur <LinkComponent>Comment commencer avec les Workflows</LinkComponent>.",
"noWorkflows": "Aucun Workflows",
"noMatchingWorkflows": "Aucun Workflows correspondant",
"arithmeticSequence": "Séquence Arithmétique",
"uniformRandomDistribution": "Distribution Aléatoire Uniforme",
"noBatchGroup": "aucun groupe",
"generatorLoading": "chargement",
"generatorLoadFromFile": "Charger depuis un Fichier",
"dynamicPromptsRandom": "Prompts Dynamiques (Aléatoire)",
"integerRangeGenerator": "Générateur d'interval d'entiers",
"generateValues": "Générer Valeurs",
"linearDistribution": "Distribution Linéaire",
"floatRangeGenerator": "Générateur d'interval de nombres décimaux",
"generatorNRandomValues_one": "{{count}} valeur aléatoire",
"generatorNRandomValues_many": "{{count}} valeurs aléatoires",
"generatorNRandomValues_other": "{{count}} valeurs aléatoires",
"dynamicPromptsCombinatorial": "Prompts Dynamiques (Combinatoire)",
"parseString": "Analyser la chaine de charactères",
"internalDesc": "Cette invocation est utilisée internalement par Invoke. En fonction des mises à jours il est possible que des changements y soit effectués ou qu'elle soit supprimé sans prévention.",
"splitOn": "Diviser sur",
"generatorNoValues": "vide",
"addItem": "Ajouter un élément",
"specialDesc": "Cette invocation nécessite un traitement spécial dans l'application. Par exemple, les nœuds Batch sont utilisés pour mettre en file d'attente plusieurs graphes à partir d'un seul workflow.",
"unableToUpdateNode": "La mise à jour du nœud a échoué : nœud {{node}} de type {{type}} (peut nécessiter la suppression et la recréation).",
"deletedMissingNodeFieldFormElement": "Champ de formulaire manquant supprimé : nœud {{nodeId}} champ {{fieldName}}",
"nodeName": "Nom du nœud",
"description": "Description",
"loadWorkflowDesc": "Charger le workflow?",
"missingSourceOrTargetNode": "Nœud source ou cible manquant",
"generatorImagesCategory": "Catégorie",
"generatorImagesFromBoard": "Images de la Planche",
"missingSourceOrTargetHandle": "Manque de gestionnaire source ou cible",
"loadingTemplates": "Chargement de {{name}}",
"loadWorkflowDesc2": "Votre workflow actuel contient des modifications non enregistrées.",
"generatorImages_one": "{{count}} image",
"generatorImages_many": "{{count}} images",
"generatorImages_other": "{{count}} images"
"noMatchingWorkflows": "Aucun Workflows correspondant"
},
"models": {
"noMatchingModels": "Aucun modèle correspondant",
@@ -1776,41 +1691,13 @@
"deleteWorkflow2": "Êtes-vous sûr de vouloir supprimer ce Workflow? Cette action ne peut pas être annulé.",
"download": "Télécharger",
"copyShareLinkForWorkflow": "Copier le lien de partage pour le Workflow",
"delete": "Supprimer",
"builder": {
"component": "Composant",
"numberInput": "Entrée de nombre",
"slider": "Curseur",
"both": "Les deux",
"singleLine": "Ligne unique",
"multiLine": "Multi Ligne",
"headingPlaceholder": "En-tête vide",
"emptyRootPlaceholderEditMode": "Faites glisser un élément de formulaire ou un champ de nœud ici pour commencer.",
"emptyRootPlaceholderViewMode": "Cliquez sur Modifier pour commencer à créer un formulaire pour ce workflow.",
"containerPlaceholder": "Conteneur Vide",
"row": "Ligne",
"column": "Colonne",
"layout": "Mise en page",
"nodeField": "Champ de nœud",
"zoomToNode": "Zoomer sur le nœud",
"nodeFieldTooltip": "Pour ajouter un champ de nœud, cliquez sur le petit bouton plus sur le champ dans l'Éditeur de Workflow, ou faites glisser le champ par son nom dans le formulaire.",
"addToForm": "Ajouter au formulaire",
"label": "Étiquette",
"textPlaceholder": "Texte vide",
"builder": "Constructeur de Formulaire",
"resetAllNodeFields": "Réinitialiser tous les champs de nœud",
"deleteAllElements": "Supprimer tous les éléments de formulaire",
"workflowBuilderAlphaWarning": "Le constructeur de workflow est actuellement en version alpha. Il peut y avoir des changements majeurs avant la version stable.",
"showDescription": "Afficher la description"
},
"openLibrary": "Ouvrir la Bibliothèque"
"delete": "Supprimer"
},
"whatsNew": {
"whatsNewInInvoke": "Quoi de neuf dans Invoke",
"watchRecentReleaseVideos": "Regarder les vidéos des dernières versions",
"items": [
"<StrongComponent>FLUX Guidage Régional (bêta)</StrongComponent> : Notre version bêta de FLUX Guidage Régional est en ligne pour le contrôle des prompt régionaux.",
"Autres améliorations : mise en file d'attente par lots plus rapide, meilleur redimensionnement, sélecteur de couleurs amélioré et nœuds de métadonnées."
"<StrongComponent>FLUX Guidage Régional (bêta)</StrongComponent> : Notre version bêta de FLUX Guidage Régional est en ligne pour le contrôle des prompt régionaux."
],
"readReleaseNotes": "Notes de version",
"watchUiUpdatesOverview": "Aperçu des mises à jour de l'interface utilisateur"
@@ -1924,49 +1811,7 @@
"cancel": "Annuler",
"advanced": "Avancé",
"processingLayerWith": "Calque de traitement avec le filtre {{type}}.",
"forMoreControl": "Pour plus de contrôle, cliquez sur Avancé ci-dessous.",
"adjust_image": {
"b": "B (LAB)",
"blue": "Bleu (RGBA)",
"alpha": "Alpha (RGBA)",
"magenta": "Magenta (CMJN)",
"yellow": "Jaune (CMJN)",
"cb": "Cb (YCbCr)",
"cr": "Cr (YCbCr)",
"cyan": "Cyan (CMJN)",
"label": "Ajuster l'image",
"description": "Ajuste le canal sélectionné d'une image.",
"channel": "Canal",
"value_setting": "Valeur",
"scale_values": "Valeurs d'échelle",
"red": "Rouge (RGBA)",
"green": "Vert (RGBA)",
"black": "Noir (CMJN)",
"hue": "Teinte (HSV)",
"saturation": "Saturation (HSV)",
"value": "Valeur (HSV)",
"luminosity": "Luminosité (LAB)",
"a": "A (LAB)",
"y": "Y (YCbCr)"
},
"img_blur": {
"label": "Flou de l'image",
"blur_type": "Type de flou",
"box_type": "Boîte",
"description": "Floute la couche sélectionnée.",
"blur_radius": "Rayon",
"gaussian_type": "Gaussien"
},
"img_noise": {
"label": "Image de bruit",
"description": "Ajoute du bruit à la couche sélectionnée.",
"gaussian_type": "Gaussien",
"size": "Taille du bruit",
"noise_amount": "Quantité",
"noise_type": "Type de bruit",
"salt_and_pepper_type": "Sel et Poivre",
"noise_color": "Bruit coloré"
}
"forMoreControl": "Pour plus de contrôle, cliquez sur Avancé ci-dessous."
},
"canvasContextMenu": {
"saveToGalleryGroup": "Enregistrer dans la galerie",
@@ -1980,10 +1825,7 @@
"newGlobalReferenceImage": "Nouvelle image de référence globale",
"newControlLayer": "Nouveau couche de contrôle",
"newInpaintMask": "Nouveau Masque Inpaint",
"newRegionalGuidance": "Nouveau Guide Régional",
"copyToClipboard": "Copier dans le presse-papiers",
"copyBboxToClipboard": "Copier Bbox dans le presse-papiers",
"copyCanvasToClipboard": "Copier la Toile dans le presse-papiers"
"newRegionalGuidance": "Nouveau Guide Régional"
},
"bookmark": "Marque-page pour Changement Rapide",
"saveLayerToAssets": "Enregistrer la couche dans les ressources",
@@ -2149,10 +1991,7 @@
"ipAdapterMethod": "Méthode d'IP Adapter",
"full": "Complet",
"style": "Style uniquement",
"composition": "Composition uniquement",
"fullDesc": "Applique le style visuel (couleurs, textures) et la composition (mise en page, structure).",
"styleDesc": "Applique un style visuel (couleurs, textures) sans tenir compte de sa mise en page.",
"compositionDesc": "Réplique la mise en page et la structure tout en ignorant le style de la référence."
"composition": "Composition uniquement"
},
"fitBboxToLayers": "Ajuster la bounding box aux calques",
"regionIsEmpty": "La zone sélectionnée est vide",
@@ -2235,40 +2074,7 @@
"asRasterLayerResize": "En tant que $t(controlLayers.rasterLayer) (Redimensionner)",
"asControlLayer": "En tant que $t(controlLayers.controlLayer)",
"asControlLayerResize": "En $t(controlLayers.controlLayer) (Redimensionner)",
"newSession": "Nouvelle session",
"warnings": {
"controlAdapterIncompatibleBaseModel": "modèle de base de la couche de contrôle incompatible",
"controlAdapterNoControl": "aucun contrôle sélectionné/dessiné",
"rgNoPromptsOrIPAdapters": "pas de textes d'instructions ni d'images de référence",
"rgAutoNegativeNotSupported": "Auto-négatif non pris en charge pour le modèle de base sélectionné",
"rgNoRegion": "aucune région dessinée",
"ipAdapterNoModelSelected": "aucun modèle d'image de référence sélectionné",
"rgReferenceImagesNotSupported": "Les images de référence régionales ne sont pas prises en charge pour le modèle de base sélectionné",
"problemsFound": "Problèmes trouvés",
"unsupportedModel": "couche non prise en charge pour le modèle de base sélectionné",
"rgNegativePromptNotSupported": "Prompt négatif non pris en charge pour le modèle de base sélectionné",
"ipAdapterIncompatibleBaseModel": "modèle de base d'image de référence incompatible",
"controlAdapterNoModelSelected": "aucun modèle de couche de contrôle sélectionné",
"ipAdapterNoImageSelected": "Aucune image de référence sélectionnée."
},
"pasteTo": "Coller vers",
"pasteToAssets": "Ressources",
"pasteToAssetsDesc": "Coller dans les ressources",
"pasteToBbox": "Bbox",
"regionCopiedToClipboard": "{{region}} Copié dans le presse-papiers",
"copyRegionError": "Erreur de copie {{region}}",
"pasteToCanvas": "Toile",
"errors": {
"unableToFindImage": "Impossible de trouver l'image",
"unableToLoadImage": "Impossible de charger l'image"
},
"referenceImageRegional": "Image de référence (régionale)",
"pasteToBboxDesc": "Nouvelle couche (dans Bbox)",
"pasteToCanvasDesc": "Nouvelle couche (dans la Toile)",
"useImage": "Utiliser l'image",
"pastedTo": "Collé à {{destination}}",
"referenceImageEmptyState": "<UploadButton>Séléctionner une image</UploadButton> ou faites glisser une image depuis la <GalleryButton>galerie</GalleryButton> sur cette couche pour commencer.",
"referenceImageGlobal": "Image de référence (Globale)"
"newSession": "Nouvelle session"
},
"upscaling": {
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
@@ -2348,8 +2154,7 @@
"queue": "File d'attente",
"events": "Événements",
"metadata": "Métadonnées",
"gallery": "Galerie",
"dnd": "Glisser et déposer"
"gallery": "Galerie"
},
"logLevel": {
"trace": "Trace",
@@ -2366,8 +2171,9 @@
"toGetStarted": "Pour commencer, saisissez un prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement dans la <StrongComponent>Galerie</StrongComponent> ou de les modifier sur la <StrongComponent>Toile</StrongComponent>.",
"gettingStartedSeries": "Vous souhaitez plus de conseils? Consultez notre <LinkComponent>Série de démarrage</LinkComponent> pour des astuces sur l'exploitation du plein potentiel de l'Invoke Studio.",
"noModelsInstalled": "Il semble qu'aucun modèle ne soit installé",
"toGetStartedLocal": "Pour commencer, assurez-vous de télécharger ou d'importer des modèles nécessaires pour exécuter Invoke. Ensuite, saisissez le prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement sur <StrongComponent>Galerie</StrongComponent> ou les modifier sur la <StrongComponent>Toile</StrongComponent>.",
"lowVRAMMode": "Pour de meilleures performances, suivez notre <LinkComponent>guide Low VRAM</LinkComponent>."
"downloadStarterModels": "Télécharger les modèles de démarrage",
"importModels": "Importer des Modèles",
"toGetStartedLocal": "Pour commencer, assurez-vous de télécharger ou d'importer des modèles nécessaires pour exécuter Invoke. Ensuite, saisissez le prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement sur <StrongComponent>Galerie</StrongComponent> ou les modifier sur la <StrongComponent>Toile</StrongComponent>."
},
"upsell": {
"shareAccess": "Partager l'accès",
@@ -2415,8 +2221,7 @@
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
},
"howDoIUseInpaintMasks": {
"title": "Comment utiliser les masques d'inpainting?",
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
"title": "Comment utiliser les masques d'inpainting?"
},
"creatingYourFirstImage": {
"title": "Créer votre première image",
@@ -2425,10 +2230,6 @@
"understandingImageToImageAndDenoising": {
"title": "Comprendre l'Image-à-Image et le Débruitage",
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
},
"howDoIOutpaint": {
"title": "Comment effectuer un outpainting?",
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
}
},
"gettingStarted": "Commencer",
@@ -2436,10 +2237,5 @@
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
"supportVideos": "Vidéos d'assistance",
"controlCanvas": "Contrôler la toile"
},
"modelCache": {
"clear": "Effacer le cache du modèle",
"clearSucceeded": "Cache du modèle effacée",
"clearFailed": "Problème de nettoyage du cache du modèle"
}
}

View File

@@ -97,19 +97,7 @@
"ok": "Ok",
"generating": "Generazione",
"loadingModel": "Caricamento del modello",
"warnings": "Avvisi",
"step": "Passo",
"values": "Valori",
"start": "Inizio",
"end": "Fine",
"resetToDefaults": "Ripristina le impostazioni predefinite",
"seed": "Seme",
"combinatorial": "Combinatorio",
"count": "Quantità",
"board": "Bacheca",
"layout": "Schema",
"row": "Riga",
"column": "Colonna"
"warnings": "Avvisi"
},
"gallery": {
"galleryImageSize": "Dimensione dell'immagine",
@@ -120,6 +108,7 @@
"deleteImage_many": "Elimina {{count}} immagini",
"deleteImage_other": "Elimina {{count}} immagini",
"deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.",
"assets": "Risorse",
"autoAssignBoardOnClick": "Assegna automaticamente la bacheca al clic",
"featuresWillReset": "Se elimini questa immagine, quelle funzionalità verranno immediatamente ripristinate.",
"loading": "Caricamento in corso",
@@ -176,9 +165,7 @@
"imagesTab": "Immagini create e salvate in Invoke.",
"assetsTab": "File che hai caricato per usarli nei tuoi progetti.",
"boardsSettings": "Impostazioni Bacheche",
"imagesSettings": "Impostazioni Immagini Galleria",
"assets": "Risorse",
"images": "Immagini"
"imagesSettings": "Impostazioni Immagini Galleria"
},
"hotkeys": {
"searchHotkeys": "Cerca tasti di scelta rapida",
@@ -681,7 +668,7 @@
"addingImagesTo": "Aggiungi immagini a",
"systemDisconnected": "Sistema disconnesso",
"missingNodeTemplate": "Modello di nodo mancante",
"missingInputForField": "ingresso mancante",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: ingresso mancante",
"missingFieldTemplate": "Modello di campo mancante",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), altezza riquadro è {{height}}",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza riquadro è {{width}}",
@@ -694,22 +681,11 @@
"canvasIsRasterizing": "La tela è occupata (sta rasterizzando)",
"canvasIsCompositing": "La tela è occupata (in composizione)",
"canvasIsFiltering": "La tela è occupata (sta filtrando)",
"collectionTooManyItems": "troppi elementi, massimo {{maxItems}}",
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi elementi, massimo {{maxItems}}",
"canvasIsSelectingObject": "La tela è occupata (selezione dell'oggetto)",
"collectionTooFewItems": "troppi pochi elementi, minimo {{minItems}}",
"fluxModelMultipleControlLoRAs": "È possibile utilizzare solo 1 Controllo LoRA alla volta",
"collectionNumberGTMax": "{{value}} > {{maximum}} (incr max)",
"collectionStringTooLong": "troppo lungo, massimo {{maxLength}}",
"batchNodeNotConnected": "Nodo Lotto non connesso: {{label}}",
"batchNodeEmptyCollection": "Alcuni nodi lotto hanno raccolte vuote",
"emptyBatches": "lotti vuoti",
"batchNodeCollectionSizeMismatch": "Le dimensioni della raccolta nel Lotto {{batchGroupId}} non corrispondono",
"collectionStringTooShort": "troppo corto, minimo {{minLength}}",
"collectionNumberNotMultipleOf": "{{value}} non è multiplo di {{multipleOf}}",
"collectionNumberLTMin": "{{value}} < {{minimum}} (incr min)",
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (excl max)",
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (excl min)",
"collectionEmpty": "raccolta vuota"
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi pochi elementi, minimo {{minItems}}",
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} raccolta vuota",
"fluxModelMultipleControlLoRAs": "È possibile utilizzare solo 1 Controllo LoRA alla volta"
},
"useCpuNoise": "Usa la CPU per generare rumore",
"iterations": "Iterazioni",
@@ -837,13 +813,7 @@
"imagesWillBeAddedTo": "Le immagini caricate verranno aggiunte alle risorse della bacheca {{boardName}}.",
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG o JPEG.",
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
"outOfMemoryErrorDescLocal": "Segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent> per ridurre gli OOM.",
"pasteFailed": "Incolla non riuscita",
"pasteSuccess": "Incollato su {{destination}}",
"unableToCopy": "Impossibile copiare",
"unableToCopyDesc": "Il tuo browser non supporta l'accesso agli appunti. Gli utenti di Firefox potrebbero risolvere il problema seguendo ",
"unableToCopyDesc_theseSteps": "questi passaggi"
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG o JPEG."
},
"accessibility": {
"invokeProgressBar": "Barra di avanzamento generazione",
@@ -926,8 +896,8 @@
"boolean": "Booleani",
"node": "Nodo",
"collection": "Raccolta",
"cannotConnectInputToInput": "Impossibile collegare ingresso a ingresso",
"cannotConnectOutputToOutput": "Impossibile collegare uscita ad uscita",
"cannotConnectInputToInput": "Impossibile collegare Input a Input",
"cannotConnectOutputToOutput": "Impossibile collegare Output ad Output",
"cannotConnectToSelf": "Impossibile connettersi a se stesso",
"mismatchedVersion": "Nodo non valido: il nodo {{node}} di tipo {{type}} ha una versione non corrispondente (provare ad aggiornare?)",
"loadingNodes": "Caricamento nodi...",
@@ -1002,39 +972,7 @@
"noWorkflows": "Nessun flusso di lavoro",
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>.",
"specialDesc": "Questa invocazione comporta una gestione speciale nell'applicazione. Ad esempio, i nodi Lotto vengono utilizzati per mettere in coda più grafici da un singolo flusso di lavoro.",
"internalDesc": "Questa invocazione è utilizzata internamente da Invoke. Potrebbe subire modifiche significative durante gli aggiornamenti dell'app e potrebbe essere rimossa in qualsiasi momento.",
"addItem": "Aggiungi elemento",
"generateValues": "Genera valori",
"generatorNoValues": "vuoto",
"linearDistribution": "Distribuzione lineare",
"parseString": "Analizza stringa",
"splitOn": "Diviso su",
"noBatchGroup": "nessun gruppo",
"generatorLoading": "caricamento",
"generatorLoadFromFile": "Carica da file",
"dynamicPromptsRandom": "Prompt dinamici (casuali)",
"dynamicPromptsCombinatorial": "Prompt dinamici (combinatori)",
"floatRangeGenerator": "Generatore di intervalli di numeri in virgola mobile",
"integerRangeGenerator": "Generatore di intervalli di numeri interi",
"uniformRandomDistribution": "Distribuzione casuale uniforme",
"generatorNRandomValues_one": "{{count}} valore casuale",
"generatorNRandomValues_many": "{{count}} valori casuali",
"generatorNRandomValues_other": "{{count}} valori casuali",
"arithmeticSequence": "Sequenza aritmetica",
"nodeName": "Nome del nodo",
"loadWorkflowDesc": "Caricare il flusso di lavoro?",
"loadWorkflowDesc2": "Il flusso di lavoro corrente presenta modifiche non salvate.",
"downloadWorkflowError": "Errore durante lo scaricamento del flusso di lavoro",
"deletedMissingNodeFieldFormElement": "Campo modulo mancante eliminato: nodo {{nodeId}} campo {{fieldName}}",
"loadingTemplates": "Caricamento {{name}}",
"unableToUpdateNode": "Aggiornamento del nodo non riuscito: nodo {{node}} di tipo {{type}} (potrebbe essere necessario eliminarlo e ricrearlo)",
"description": "Descrizione",
"generatorImagesCategory": "Categoria",
"generatorImages_one": "{{count}} immagine",
"generatorImages_many": "{{count}} immagini",
"generatorImages_other": "{{count}} immagini",
"generatorImagesFromBoard": "Immagini dalla Bacheca",
"missingSourceOrTargetNode": "Nodo sorgente o di destinazione mancante"
"internalDesc": "Questa invocazione è utilizzata internamente da Invoke. Potrebbe subire modifiche significative durante gli aggiornamenti dell'app e potrebbe essere rimossa in qualsiasi momento."
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
@@ -1156,14 +1094,7 @@
"generation": "Generazione",
"other": "Altro",
"gallery": "Galleria",
"batchSize": "Dimensione del lotto",
"cancelAllExceptCurrentQueueItemAlertDialog2": "Vuoi davvero annullare tutti gli elementi in coda in sospeso?",
"confirm": "Conferma",
"cancelAllExceptCurrentQueueItemAlertDialog": "L'annullamento di tutti gli elementi della coda, eccetto quello corrente, interromperà gli elementi in sospeso ma consentirà il completamento di quello in corso.",
"cancelAllExceptCurrentTooltip": "Annulla tutto tranne l'elemento corrente",
"retrySucceeded": "Elemento rieseguito",
"retryItem": "Riesegui elemento",
"retryFailed": "Problema riesecuzione elemento"
"batchSize": "Dimensione del lotto"
},
"models": {
"noMatchingModels": "Nessun modello corrispondente",
@@ -1207,8 +1138,7 @@
"dynamicPrompts": "Prompt dinamici",
"promptsPreview": "Anteprima dei prompt",
"showDynamicPrompts": "Mostra prompt dinamici",
"loading": "Generazione prompt dinamici...",
"promptsToGenerate": "Prompt da generare"
"loading": "Generazione prompt dinamici..."
},
"popovers": {
"paramScheduler": {
@@ -1741,38 +1671,7 @@
"download": "Scarica",
"copyShareLink": "Copia Condividi Link",
"copyShareLinkForWorkflow": "Copia Condividi Link del Flusso di lavoro",
"delete": "Elimina",
"openLibrary": "Apri la libreria",
"builder": {
"resetAllNodeFields": "Reimposta tutti i campi del nodo",
"row": "Riga",
"nodeField": "Campo del nodo",
"slider": "Cursore",
"emptyRootPlaceholderEditMode": "Per iniziare, trascina qui un elemento del modulo o un campo nodo.",
"containerPlaceholder": "Contenitore vuoto",
"headingPlaceholder": "Titolo vuoto",
"column": "Colonna",
"nodeFieldTooltip": "Per aggiungere un campo nodo, fare clic sul piccolo pulsante con il segno più sul campo nell'editor del flusso di lavoro, oppure trascinare il campo in base al suo nome nel modulo.",
"label": "Etichetta",
"deleteAllElements": "Elimina tutti gli elementi del modulo",
"addToForm": "Aggiungi al Modulo",
"layout": "Schema",
"builder": "Generatore Modulo",
"zoomToNode": "Zoom sul nodo",
"component": "Componente",
"showDescription": "Mostra Descrizione",
"singleLine": "Linea singola",
"multiLine": "Linea Multipla",
"both": "Entrambi",
"emptyRootPlaceholderViewMode": "Fare clic su Modifica per iniziare a creare un modulo per questo flusso di lavoro.",
"textPlaceholder": "Testo vuoto",
"workflowBuilderAlphaWarning": "Il generatore di flussi di lavoro è attualmente in versione alpha. Potrebbero esserci cambiamenti radicali prima della versione stabile.",
"heading": "Intestazione",
"divider": "Divisore",
"container": "Contenitore",
"text": "Testo",
"numberInput": "Ingresso numerico"
}
"delete": "Elimina"
},
"accordions": {
"compositing": {
@@ -2008,43 +1907,7 @@
},
"forMoreControl": "Per un maggiore controllo, fare clic su Avanzate qui sotto.",
"advanced": "Avanzate",
"processingLayerWith": "Elaborazione del livello con il filtro {{type}}.",
"img_blur": {
"label": "Sfoca immagine",
"description": "Sfoca il livello selezionato.",
"blur_type": "Tipo di sfocatura",
"blur_radius": "Raggio",
"gaussian_type": "Gaussiana"
},
"img_noise": {
"size": "Dimensione del rumore",
"salt_and_pepper_type": "Sale e pepe",
"gaussian_type": "Gaussiano",
"noise_color": "Rumore colorato",
"description": "Aggiunge rumore al livello selezionato.",
"noise_type": "Tipo di rumore",
"label": "Aggiungi rumore",
"noise_amount": "Quantità"
},
"adjust_image": {
"description": "Regola il canale selezionato di un'immagine.",
"alpha": "Alfa (RGBA)",
"label": "Regola l'immagine",
"blue": "Blu (RGBA)",
"luminosity": "Luminosità (LAB)",
"channel": "Canale",
"value_setting": "Valore",
"scale_values": "Scala i valori",
"red": "Rosso (RGBA)",
"green": "Verde (RGBA)",
"cyan": "Ciano (CMYK)",
"magenta": "Magenta (CMYK)",
"yellow": "Giallo (CMYK)",
"black": "Nero (CMYK)",
"hue": "Tonalità (HSV)",
"saturation": "Saturazione (HSV)",
"value": "Valore (HSV)"
}
"processingLayerWith": "Elaborazione del livello con il filtro {{type}}."
},
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
"regionalGuidance_withCount_hidden": "Guida regionale ({{count}} nascosti)",
@@ -2151,10 +2014,7 @@
"saveCanvasToGallery": "Salva la Tela nella Galleria",
"saveToGalleryGroup": "Salva nella Galleria",
"newInpaintMask": "Nuova maschera Inpaint",
"newRegionalGuidance": "Nuova Guida Regionale",
"copyToClipboard": "Copia negli appunti",
"copyCanvasToClipboard": "Copia la tela negli appunti",
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
"newRegionalGuidance": "Nuova Guida Regionale"
},
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
@@ -2194,7 +2054,7 @@
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
"useImage": "Usa immagine",
"resetGenerationSettings": "Ripristina impostazioni di generazione",
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello.",
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
"asControlLayer": "Come $t(controlLayers.controlLayer)",
@@ -2217,20 +2077,6 @@
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato"
},
"pasteTo": "Incolla su",
"pasteToBboxDesc": "Nuovo livello (nel riquadro di delimitazione)",
"pasteToAssets": "Risorse",
"copyRegionError": "Errore durante la copia di {{region}}",
"pasteToAssetsDesc": "Incolla in Risorse",
"pasteToBbox": "Riquadro di delimitazione",
"pasteToCanvas": "Tela",
"pasteToCanvasDesc": "Nuovo livello (nella Tela)",
"pastedTo": "Incollato su {{destination}}",
"regionCopiedToClipboard": "{{region}} Copiato negli appunti",
"errors": {
"unableToFindImage": "Impossibile trovare l'immagine",
"unableToLoadImage": "Impossibile caricare l'immagine"
}
},
"ui": {
@@ -2320,9 +2166,10 @@
"newUserExperience": {
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
"toGetStarted": "Per iniziare, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
"noModelsInstalled": "Sembra che non hai installato alcun modello! Puoi <DownloadStarterModelsButton>scaricare un pacchetto di modelli di avvio</DownloadStarterModelsButton> o <ImportModelsButton>importare modelli</ImportModelsButton>.",
"toGetStartedLocal": "Per iniziare, assicurati di scaricare o importare i modelli necessari per eseguire Invoke. Quindi, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
"lowVRAMMode": "Per prestazioni ottimali, segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent>."
"importModels": "Importa modelli",
"downloadStarterModels": "Scarica i modelli per iniziare",
"noModelsInstalled": "Sembra che tu non abbia installato alcun modello",
"toGetStartedLocal": "Per iniziare, assicurati di scaricare o importare i modelli necessari per eseguire Invoke. Quindi, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>."
},
"whatsNew": {
"whatsNewInInvoke": "Novità in Invoke",
@@ -2330,8 +2177,7 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"Editor del flusso di lavoro: nuovo generatore di moduli trascina-e-rilascia per una creazione più facile del flusso di lavoro.",
"Altri miglioramenti: messa in coda dei lotti più rapida, migliore ampliamento, selettore colore migliorato e nodi metadati."
"<StrongComponent>Livelli di controllo Flux</StrongComponent>: nuovi modelli di controllo per il rilevamento dei bordi e la mappatura della profondità sono ora supportati per i modelli di Flux dev."
]
},
"system": {
@@ -2421,10 +2267,5 @@
"watch": "Guarda",
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
},
"modelCache": {
"clear": "Cancella la cache del modello",
"clearSucceeded": "Cache del modello cancellata",
"clearFailed": "Problema durante la cancellazione della cache del modello"
}
}

View File

@@ -32,29 +32,29 @@
"learnMore": "もっと学ぶ",
"random": "ランダム",
"batch": "バッチマネージャー",
"advanced": "高度",
"advanced": "高度な設定",
"created": "作成済",
"green": "緑",
"blue": "青",
"alpha": "アルファ",
"outpaint": "outpaint",
"outpaint": "アウトペイント",
"unknown": "不明",
"updated": "更新済",
"add": "追加",
"ai": "ai",
"ai": "AI",
"copyError": "$t(gallery.copy) エラー",
"data": "データ",
"template": "テンプレート",
"red": "赤",
"or": "または",
"checkpoint": "Checkpoint",
"checkpoint": "チェックポイント",
"direction": "方向",
"simple": "シンプル",
"save": "保存",
"saveAs": "名前をつけて保存",
"somethingWentWrong": "何かの問題が発生しました",
"details": "詳細",
"inpaint": "inpaint",
"inpaint": "インペイント",
"delete": "削除",
"nextPage": "次のページ",
"copy": "コピー",
@@ -70,12 +70,12 @@
"unknownError": "未知のエラー",
"orderBy": "並び順:",
"enabled": "有効",
"notInstalled": "未 $t(common.installed)",
"notInstalled": "未インストール",
"positivePrompt": "ポジティブプロンプト",
"negativePrompt": "ネガティブプロンプト",
"selected": "選択済み",
"aboutDesc": "Invokeを業務で利用する場合はマークしてください:",
"beta": "Beta",
"beta": "ベータ",
"disabled": "無効",
"editor": "エディタ",
"safetensors": "Safetensors",
@@ -93,27 +93,7 @@
"reset": "リセット",
"none": "なし",
"new": "新規",
"close": "閉じる",
"warnings": "警告",
"dontShowMeThese": "次回から表示しない",
"goTo": "移動",
"generating": "生成中",
"loadingModel": "モデルをロード中",
"layout": "レイアウト",
"step": "ステップ",
"start": "開始",
"count": "回数",
"end": "終了",
"min": "最小",
"max": "最大",
"values": "値",
"resetToDefaults": "デフォルトに戻す",
"row": "行",
"column": "列",
"board": "ボード",
"seed": "シード",
"combinatorial": "組み合わせ",
"aboutHeading": "想像力をこの手に"
"close": "閉じる"
},
"gallery": {
"galleryImageSize": "画像のサイズ",
@@ -126,10 +106,11 @@
"featuresWillReset": "この画像を削除すると、これらの機能は即座にリセットされます。",
"unstarImage": "スターを外す",
"loading": "ロード中",
"assets": "アセット",
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
"drop": "ドロップ",
"dropOrUpload": "$t(gallery.drop) またはアップロード",
"deleteImage_other": "画像 {{count}} 枚を削除",
"deleteImage_other": "画像を削除",
"deleteImagePermanent": "削除された画像は復元できません。",
"download": "ダウンロード",
"unableToLoad": "ギャラリーをロードできません",
@@ -175,12 +156,7 @@
"displayBoardSearch": "ボード検索",
"displaySearch": "画像を検索",
"boardsSettings": "ボード設定",
"imagesSettings": "ギャラリー画像設定",
"selectAllOnPage": "ページ上のすべてを選択",
"images": "画像",
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
"imagesTab": "Invoke内で作成および保存された画像。",
"assets": "アセット"
"imagesSettings": "ギャラリー画像設定"
},
"hotkeys": {
"searchHotkeys": "ホットキーを検索",
@@ -205,121 +181,44 @@
},
"canvas": {
"redo": {
"title": "やり直し",
"desc": "最後のキャンバス操作をやり直します。"
"title": "やり直し"
},
"transformSelected": {
"title": "変形",
"desc": "選択したレイヤーを変形します。"
"title": "変形"
},
"undo": {
"title": "取り消し",
"desc": "最後のキャンバス操作を取り消します。"
"title": "取り消し"
},
"selectEraserTool": {
"title": "消しゴムツール",
"desc": "消しゴムツールを選択します。"
"title": "消しゴムツール"
},
"cancelTransform": {
"title": "変形をキャンセル",
"desc": "保留中の変形をキャンセルします。"
"title": "変形をキャンセル"
},
"resetSelected": {
"title": "レイヤーをリセット",
"desc": "選択したレイヤーをリセットします。この操作はInpaint MaskおよびRegional Guidanceにのみ適用されます。"
"title": "レイヤーをリセット"
},
"applyTransform": {
"title": "変形を適用",
"desc": "保留中の変形を選択したレイヤーに適用します。"
"title": "変形を適用"
},
"selectColorPickerTool": {
"title": "スポイトツール",
"desc": "スポイトツールを選択します。"
"title": "スポイトツール"
},
"fitBboxToCanvas": {
"title": "バウンディングボックスをキャンバスにフィット",
"desc": "バウンディングボックスがキャンバスに収まるように表示を拡大、位置調整します。"
"title": "バウンディングボックスをキャンバスにフィット"
},
"selectBrushTool": {
"title": "ブラシツール",
"desc": "ブラシツールを選択します。"
"title": "ブラシツール"
},
"selectMoveTool": {
"title": "移動ツール",
"desc": "移動ツールを選択します。"
"title": "移動ツール"
},
"selectBboxTool": {
"title": "バウンディングボックスツール",
"desc": "バウンディングボックスツールを選択します。"
"title": "バウンディングボックスツール"
},
"title": "キャンバス",
"fitLayersToCanvas": {
"title": "レイヤーをキャンバスにフィット",
"desc": "すべての表示レイヤーがキャンバスに収まるように表示を拡大、位置調整します。"
},
"setZoomTo400Percent": {
"desc": "キャンバスのズームを400%に設定します。",
"title": "400%にズーム"
},
"setZoomTo800Percent": {
"title": "800%にズーム",
"desc": "キャンバスのズームを800%に設定します。"
},
"quickSwitch": {
"title": "レイヤーのクイックスイッチ",
"desc": "最後に選択した2つのレイヤー間を切り替えます。レイヤーがブックマークされている場合、常にそのレイヤーと最後に選択したブックマークされていないレイヤーの間を切り替えます。"
},
"nextEntity": {
"title": "次のレイヤー",
"desc": "リスト内の次のレイヤーを選択します。"
},
"filterSelected": {
"title": "フィルター",
"desc": "選択したレイヤーをフィルターします。RasterおよびControlレイヤーにのみ適用されます。"
},
"prevEntity": {
"desc": "リスト内の前のレイヤーを選択します。",
"title": "前のレイヤー"
},
"setFillToWhite": {
"title": "ツール色を白に設定",
"desc": "現在のツールの色を白色に設定します。"
},
"selectViewTool": {
"title": "表示ツール",
"desc": "表示ツールを選択します。"
},
"setZoomTo100Percent": {
"title": "100%にズーム",
"desc": "キャンバスのズームを100%に設定します。"
},
"deleteSelected": {
"desc": "選択したレイヤーを削除します。",
"title": "レイヤーを削除"
},
"cancelFilter": {
"desc": "保留中のフィルターをキャンセルします。",
"title": "フィルターをキャンセル"
},
"applyFilter": {
"title": "フィルターを適用",
"desc": "保留中のフィルターを選択したレイヤーに適用します。"
},
"setZoomTo200Percent": {
"title": "200%にズーム",
"desc": "キャンバスのズームを200%に設定します。"
},
"decrementToolWidth": {
"title": "ツール幅を縮小する",
"desc": "選択中のブラシまたは消しゴムツールの幅を減少させます。"
},
"incrementToolWidth": {
"desc": "選択中のブラシまたは消しゴムツールの幅を増加させます。",
"title": "ツール幅を増加する"
},
"selectRectTool": {
"title": "矩形ツール",
"desc": "矩形ツールを選択します。"
"title": "レイヤーをキャンバスにフィット"
}
},
"workflows": {
@@ -328,13 +227,6 @@
},
"redo": {
"title": "やり直し"
},
"title": "ワークフロー",
"pasteSelection": {
"title": "ペースト"
},
"copySelection": {
"title": "コピー"
}
},
"app": {
@@ -344,62 +236,16 @@
},
"title": "アプリケーション",
"invoke": {
"title": "生成",
"desc": "生成をキューに追加し、キューの末尾に加えます。"
"title": "Invoke"
},
"cancelQueueItem": {
"title": "キャンセル",
"desc": "現在処理中のキュー項目をキャンセルします。"
"title": "キャンセル"
},
"clearQueue": {
"title": "キューをクリア",
"desc": "すべてのキュー項目をキャンセルして消去します。"
},
"selectCanvasTab": {
"desc": "キャンバスタブを選択します。",
"title": "キャンバスタブを選択"
},
"selectUpscalingTab": {
"desc": "アップスケーリングタブを選択します。",
"title": "アップスケーリングタブを選択"
},
"toggleRightPanel": {
"desc": "右パネルを表示または非表示。",
"title": "右パネルをトグル"
},
"selectModelsTab": {
"title": "モデルタブを選択",
"desc": "モデルタブを選択します。"
},
"invokeFront": {
"desc": "生成をキューに追加し、キューの先頭に加えます。",
"title": "生成(先頭)"
},
"resetPanelLayout": {
"title": "パネルレイアウトをリセット",
"desc": "左パネルと右パネルをデフォルトのサイズとレイアウトにリセットします。"
},
"togglePanels": {
"desc": "左パネルと右パネルを合わせて表示または非表示。",
"title": "パネルをトグル"
},
"selectWorkflowsTab": {
"desc": "ワークフロータブを選択します。",
"title": "ワークフロータブを選択"
},
"selectQueueTab": {
"title": "キュータブを選択",
"desc": "キュータブを選択します。"
},
"focusPrompt": {
"title": "プロンプトにフォーカス",
"desc": "カーソルをポジティブプロンプト欄に移動します。"
"title": "キューをクリア"
}
},
"hotkeys": "ホットキー",
"gallery": {
"title": "ギャラリー"
}
"hotkeys": "ホットキー"
},
"modelManager": {
"modelManager": "モデルマネージャ",
@@ -410,13 +256,13 @@
"name": "名前",
"description": "概要",
"config": "コンフィグ",
"repo_id": "リポジトリID",
"repo_id": "Repo ID",
"width": "幅",
"height": "高さ",
"addModel": "モデルを追加",
"availableModels": "モデルを有効化",
"search": "検索",
"load": "ロード",
"load": "Load",
"active": "active",
"selected": "選択済",
"delete": "削除",
@@ -436,7 +282,7 @@
"modelConverted": "モデル変換が完了しました",
"predictionType": "予測タイプSD 2.x モデルおよび一部のSD 1.x モデル用)",
"selectModel": "モデルを選択",
"advanced": "高度",
"advanced": "高度な設定",
"modelDeleted": "モデルが削除されました",
"convertToDiffusersHelpText2": "このプロセスでは、モデルマネージャーのエントリーを同じモデルのディフューザーバージョンに置き換えます。",
"modelUpdateFailed": "モデル更新が失敗しました",
@@ -449,20 +295,7 @@
"convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。",
"cancel": "キャンセル",
"uploadImage": "画像をアップロード",
"addModels": "モデルを追加",
"modelName": "モデル名",
"source": "ソース",
"path": "パス",
"modelSettings": "モデル設定",
"vae": "VAE",
"huggingFace": "HuggingFace",
"huggingFaceRepoID": "HuggingFace リポジトリID",
"metadata": "メタデータ",
"loraModels": "LoRA",
"edit": "編集",
"install": "インストール",
"huggingFacePlaceholder": "owner/model-name",
"variant": "Variant"
"addModels": "モデルを追加"
},
"parameters": {
"images": "画像",
@@ -473,7 +306,7 @@
"shuffle": "シャッフル",
"strength": "強度",
"upscaling": "アップスケーリング",
"scale": "スケール",
"scale": "Scale",
"scaleBeforeProcessing": "処理前のスケール",
"scaledWidth": "幅のスケール",
"scaledHeight": "高さのスケール",
@@ -482,7 +315,7 @@
"useSeed": "シード値を使用",
"useAll": "すべてを使用",
"info": "情報",
"showOptionsPanel": "サイドパネルを表示 (O or T)",
"showOptionsPanel": "オプションパネルを表示",
"iterations": "生成回数",
"general": "基本設定",
"setToOptimalSize": "サイズをモデルに最適化",
@@ -496,29 +329,16 @@
"useSize": "サイズを使用",
"postProcessing": "ポストプロセス (Shift + U)",
"denoisingStrength": "ノイズ除去強度",
"recallMetadata": "メタデータを再使用",
"copyImage": "画像をコピー",
"positivePromptPlaceholder": "ポジティブプロンプト",
"negativePromptPlaceholder": "ネガティブプロンプト",
"type": "タイプ",
"cancel": {
"cancel": "キャンセル"
},
"cfgScale": "CFGスケール",
"tileSize": "タイルサイズ",
"coherenceMode": "モード"
"recallMetadata": "メタデータを再使用"
},
"settings": {
"models": "モデル",
"displayInProgress": "生成中の画像を表示",
"displayInProgress": "生成中の画像を表示する",
"confirmOnDelete": "削除時に確認",
"resetWebUI": "WebUIをリセット",
"resetWebUIDesc1": "WebUIのリセットは、画像と保存された設定のキャッシュをリセットするだけです。画像を削除するわけではありません。",
"resetWebUIDesc2": "もしギャラリーに画像が表示されないなど、何か問題が発生した場合はGitHubにissueを提出する前にリセットを試してください。",
"resetComplete": "WebUIはリセットされました。",
"ui": "ユーザーインターフェイス",
"beta": "ベータ",
"developer": "開発者"
"resetComplete": "WebUIはリセットされました。F5を押して再読み込みしてください。"
},
"toast": {
"uploadFailed": "アップロード失敗",
@@ -526,8 +346,7 @@
"imageUploadFailed": "画像のアップロードに失敗しました",
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
"sentToUpscale": "アップスケーラーに転送しました",
"imageUploaded": "画像をアップロードしました",
"serverError": "サーバーエラー"
"imageUploaded": "画像をアップロードしました"
},
"accessibility": {
"invokeProgressBar": "進捗バー",
@@ -538,7 +357,7 @@
"menu": "メニュー",
"createIssue": "問題を報告",
"resetUI": "$t(accessibility.reset) UI",
"mode": "モード",
"mode": "モード:",
"about": "Invoke について",
"submitSupportTicket": "サポート依頼を送信する",
"uploadImages": "画像をアップロード",
@@ -555,20 +374,7 @@
"positivePrompt": "ポジティブプロンプト",
"strength": "Image to Image 強度",
"recallParameters": "パラメータを再使用",
"recallParameter": "{{label}} を再使用",
"imageDimensions": "画像サイズ",
"imageDetails": "画像の詳細",
"model": "モデル",
"allPrompts": "すべてのプロンプト",
"cfgScale": "CFGスケール",
"createdBy": "作成:",
"metadata": "メタデータ",
"height": "高さ",
"negativePrompt": "ネガティブプロンプト",
"generationMode": "生成モード",
"vae": "VAE",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
"canvasV2Metadata": "キャンバス"
"recallParameter": "{{label}} を再使用"
},
"queue": {
"queueEmpty": "キューが空です",
@@ -600,7 +406,7 @@
"batchQueuedDesc_other": "{{count}} セッションをキューの{{direction}}に追加しました",
"graphQueued": "グラフをキューに追加しました",
"batch": "バッチ",
"clearQueueAlertDialog": "キューをクリアすると、処理中の項目は直ちにキャンセルされ、キューは完全にクリアされます。保留中のフィルターもキャンセルされます。",
"clearQueueAlertDialog": "キューをクリアすると、処理中のアイテムは直ちにキャンセルされ、キューは完全にクリアされます。",
"pending": "保留中",
"resumeFailed": "処理の再開に問題があります",
"clear": "クリア",
@@ -618,7 +424,7 @@
"enqueueing": "バッチをキューに追加",
"cancelBatchFailed": "バッチのキャンセルに問題があります",
"clearQueueAlertDialog2": "キューをクリアしてもよろしいですか?",
"item": "項目",
"item": "アイテム",
"graphFailedToQueue": "グラフをキューに追加できませんでした",
"batchFieldValues": "バッチの詳細",
"openQueue": "キューを開く",
@@ -634,17 +440,7 @@
"upscaling": "アップスケール",
"generation": "生成",
"other": "その他",
"gallery": "ギャラリー",
"cancelAllExceptCurrentQueueItemAlertDialog2": "すべての保留中のキュー項目をキャンセルしてもよいですか?",
"cancelAllExceptCurrentTooltip": "現在の項目を除いてすべてキャンセル",
"origin": "先頭",
"destination": "宛先",
"confirm": "確認",
"retryItem": "項目をリトライ",
"batchSize": "バッチサイズ",
"retryFailed": "項目のリトライに問題があります",
"cancelAllExceptCurrentQueueItemAlertDialog": "現在の項目を除くすべてのキュー項目をキャンセルすると、保留中の項目は停止しますが、進行中の項目は完了します。",
"retrySucceeded": "項目がリトライされました"
"gallery": "ギャラリー"
},
"models": {
"noMatchingModels": "一致するモデルがありません",
@@ -653,14 +449,13 @@
"noModelsAvailable": "使用可能なモデルがありません",
"selectModel": "モデルを選択してください",
"concepts": "コンセプト",
"addLora": "LoRAを追加",
"lora": "LoRA"
"addLora": "LoRAを追加"
},
"nodes": {
"addNode": "ノードを追加",
"boolean": "ブーリアン",
"addNodeToolTip": "ノードを追加 (Shift+A, Space)",
"missingTemplate": "Invalid node: タイプ {{type}} のノード {{node}} にテンプレートがりません(未インストール?)",
"missingTemplate": "テンプレートが見つかりません",
"loadWorkflow": "ワークフローを読み込み",
"hideLegendNodes": "フィールドタイプの凡例を非表示",
"float": "浮動小数点",
@@ -671,7 +466,7 @@
"currentImageDescription": "ノードエディタ内の現在の画像を表示",
"downloadWorkflow": "ワークフローのJSONをダウンロード",
"fieldTypesMustMatch": "フィールドタイプが一致している必要があります",
"edge": "エッジ",
"edge": "輪郭",
"animatedEdgesHelp": "選択したエッジおよび選択したノードに接続されたエッジをアニメーション化します",
"cannotDuplicateConnection": "重複した接続は作れません",
"noWorkflow": "ワークフローがありません",
@@ -690,20 +485,7 @@
"cannotConnectToSelf": "自身のノードには接続できません",
"colorCodeEdges": "カラー-Code Edges",
"loadingNodes": "ノードを読み込み中...",
"scheduler": "スケジューラー",
"version": "バージョン",
"edit": "編集",
"nodeVersion": "ノードバージョン",
"workflowTags": "タグ",
"string": "文字列",
"workflowVersion": "バージョン",
"workflowAuthor": "作者",
"ipAdapter": "IP-Adapter",
"notes": "ノート",
"workflow": "ワークフロー",
"workflowName": "名前",
"workflowNotes": "ノート",
"enum": "Enum"
"scheduler": "スケジューラー"
},
"boards": {
"autoAddBoard": "自動追加するボード",
@@ -725,7 +507,7 @@
"deleteBoard": "ボードの削除",
"deleteBoardAndImages": "ボードと画像の削除",
"deleteBoardOnly": "ボードのみ削除",
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません",
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
"hideBoards": "ボードを隠す",
"assetsWithCount_other": "{{count}} のアセット",
@@ -737,12 +519,7 @@
"archiveBoard": "ボードをアーカイブ",
"archived": "アーカイブ完了",
"unarchiveBoard": "アーカイブされていないボード",
"imagesWithCount_other": "{{count}} の画像",
"updateBoardError": "ボード更新エラー",
"selectedForAutoAdd": "自動追加に選択済み",
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
"noBoards": "{{boardType}} ボードがありません",
"viewBoards": "ボードを表示"
"imagesWithCount_other": "{{count}} の画像"
},
"invocationCache": {
"invocationCache": "呼び出しキャッシュ",
@@ -794,57 +571,6 @@
},
"paramAspect": {
"heading": "縦横比"
},
"refinerSteps": {
"heading": "ステップ"
},
"paramVAE": {
"heading": "VAE"
},
"scale": {
"heading": "スケール"
},
"refinerScheduler": {
"heading": "スケジューラー"
},
"compositingCoherenceMode": {
"heading": "モード"
},
"paramModel": {
"heading": "モデル"
},
"paramHeight": {
"heading": "高さ"
},
"paramSteps": {
"heading": "ステップ"
},
"ipAdapterMethod": {
"heading": "モード"
},
"paramSeed": {
"heading": "シード"
},
"paramIterations": {
"heading": "生成回数"
},
"controlNet": {
"heading": "ControlNet"
},
"paramWidth": {
"heading": "幅"
},
"lora": {
"heading": "LoRA"
},
"loraWeight": {
"heading": "重み"
},
"patchmatchDownScaleSize": {
"heading": "Downscale"
},
"controlNetWeight": {
"heading": "重み"
}
},
"accordions": {
@@ -854,8 +580,7 @@
"coherenceTab": "コヒーレンスパス"
},
"advanced": {
"title": "高度",
"options": "$t(accordions.advanced.title) オプション"
"title": "高度な設定"
},
"control": {
"title": "コントロール"
@@ -884,11 +609,7 @@
},
"ui": {
"tabs": {
"queue": "キュー",
"canvas": "キャンバス",
"workflows": "ワークフロー",
"models": "モデル",
"gallery": "ギャラリー"
"queue": "キュー"
}
},
"controlLayers": {
@@ -903,8 +624,7 @@
"bboxGroup": "バウンディングボックスから作成",
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
"newGlobalReferenceImage": "新規全域参照画像",
"newRegionalReferenceImage": "新規領域参照画像",
"canvasGroup": "キャンバス"
"newRegionalReferenceImage": "新規領域参照画像"
},
"regionalGuidance": "領域ガイダンス",
"globalReferenceImage": "全域参照画像",
@@ -925,8 +645,7 @@
"brush": "ブラシ",
"rectangle": "矩形",
"move": "移動",
"eraser": "消しゴム",
"bbox": "Bbox"
"eraser": "消しゴム"
},
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
@@ -944,27 +663,7 @@
"canvas": "キャンバス",
"fitBboxToLayers": "バウンディングボックスをレイヤーにフィット",
"removeBookmark": "ブックマークを外す",
"savedToGalleryOk": "ギャラリーに保存しました",
"controlMode": {
"prompt": "プロンプト"
},
"prompt": "プロンプト",
"settings": {
"snapToGrid": {
"off": "オフ",
"on": "オン"
}
},
"filter": {
"filter": "フィルター",
"spandrel_filter": {
"model": "モデル"
},
"apply": "適用",
"reset": "リセット",
"cancel": "キャンセル"
},
"weight": "重み"
"savedToGalleryOk": "ギャラリーに保存しました"
},
"stylePresets": {
"clearTemplateSelection": "選択したテンプレートをクリア",
@@ -976,54 +675,15 @@
"createPromptTemplate": "プロンプトテンプレートを作成",
"promptTemplateCleared": "プロンプトテンプレートをクリアしました",
"searchByName": "名前で検索",
"toggleViewMode": "表示モードを切り替え",
"negativePromptColumn": "'negative_prompt'",
"preview": "プレビュー",
"nameColumn": "'name'",
"type": "タイプ",
"private": "プライベート",
"name": "名称"
"toggleViewMode": "表示モードを切り替え"
},
"upscaling": {
"upscaleModel": "アップスケールモデル",
"postProcessingModel": "ポストプロセスモデル",
"upscale": "アップスケール",
"scale": "スケール"
"upscale": "アップスケール"
},
"sdxl": {
"denoisingStrength": "ノイズ除去強度",
"scheduler": "スケジューラー",
"loading": "ロード中...",
"steps": "ステップ",
"refiner": "Refiner"
},
"modelCache": {
"clear": "モデルキャッシュを消去",
"clearSucceeded": "モデルキャッシュを消去しました",
"clearFailed": "モデルキャッシュの消去中に問題が発生"
},
"workflows": {
"workflows": "ワークフロー",
"ascending": "昇順",
"name": "名前",
"descending": "降順"
},
"system": {
"logNamespaces": {
"system": "システム",
"gallery": "ギャラリー",
"workflows": "ワークフロー",
"models": "モデル",
"canvas": "キャンバス",
"metadata": "メタデータ",
"queue": "キュー"
},
"logLevel": {
"debug": "Debug",
"info": "Info",
"error": "Error",
"fatal": "Fatal",
"warn": "Warn"
}
"scheduler": "スケジューラー"
}
}

View File

@@ -68,6 +68,7 @@
"gallerySettings": "갤러리 설정",
"deleteSelection": "선택 항목 삭제",
"featuresWillReset": "이 이미지를 삭제하면 해당 기능이 즉시 재설정됩니다.",
"assets": "자산",
"noImagesInGallery": "보여줄 이미지가 없음",
"autoSwitchNewImages": "새로운 이미지로 자동 전환",
"loading": "불러오는 중",

View File

@@ -90,6 +90,7 @@
"deleteImage_one": "Verwijder afbeelding",
"deleteImage_other": "",
"deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.",
"assets": "Eigen onderdelen",
"autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken",
"featuresWillReset": "Als je deze afbeelding verwijdert, dan worden deze functies onmiddellijk teruggezet.",
"loading": "Bezig met laden",

View File

@@ -105,6 +105,7 @@
"assetsTab": "Pliki, które wrzuciłeś do użytku w twoich projektach.",
"currentlyInUse": "Ten obraz jest obecnie w użyciu przez następujące funkcje:",
"boardsSettings": "Ustawienia tablic",
"assets": "Aktywy",
"autoAssignBoardOnClick": "Automatycznie przypisz tablicę po kliknięciu",
"copy": "Kopiuj"
},

View File

@@ -106,6 +106,7 @@
"deleteImage_one": "Удалить изображение",
"deleteImage_few": "Удалить {{count}} изображения",
"deleteImage_many": "Удалить {{count}} изображений",
"assets": "Ресурсы",
"autoAssignBoardOnClick": "Авто-назначение доски по клику",
"deleteSelection": "Удалить выделенное",
"featuresWillReset": "Если вы удалите это изображение, эти функции будут немедленно сброшены.",

View File

@@ -195,6 +195,7 @@
},
"gallery": {
"deleteImagePermanent": "Silinen görseller geri getirilemez.",
"assets": "Özkaynaklar",
"autoAssignBoardOnClick": "Tıklanan Panoya Otomatik Atama",
"loading": "Yükleniyor",
"starImage": "Yıldız Koy",

View File

@@ -63,7 +63,7 @@
"compareImage": "So Sánh Ảnh",
"compareHelp4": "Nhấn <Kbd>Z</Kbd> hoặc <Kbd>Esc</Kbd> để thoát.",
"compareHelp3": "Nhấn <Kbd>C</Kbd> để đổi ảnh được so sánh.",
"compareHelp1": "Giữ <Kbd>Alt</Kbd> khi bấm vào ảnh trong thư viện ảnh hoặc dùng phím mũi tên để đổi ảnh dùng cho so sánh.",
"compareHelp1": "Giữ <Kbd>Alt</Kbd> khi bấm vào ảnh trong thư viện hoặc dùng phím mũi tên để đổi ảnh dùng cho so sánh.",
"showArchivedBoards": "Hiển Thị Bảng Được Lưu Trữ",
"drop": "Thả",
"copy": "Sao Chép",
@@ -76,16 +76,17 @@
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
"exitBoardSearch": "Thoát Tìm Kiểm Bảng",
"gallery": "Thư Viện Ảnh",
"gallery": "Thư Viện",
"galleryImageSize": "Kích Thước Ảnh",
"downloadSelection": "Tải xuống Phần Được Lựa Chọn",
"bulkDownloadRequested": "Chuẩn Bị Tải Xuống",
"unableToLoad": "Không Thể Tải Thư viện Ảnh",
"unableToLoad": "Không Thể Tải Thư viện",
"newestFirst": "Mới Nhất Trước",
"showStarredImagesFirst": "Hiển Thị Ảnh Gắn Sao Trước",
"bulkDownloadRequestedDesc": "Yêu cầu tải xuống đang được chuẩn bị. Vui lòng chờ trong giây lát.",
"starImage": "Gắn Sao Cho Ảnh",
"openViewer": "Mở Trình Xem",
"assets": "Tài Nguyên",
"viewerImage": "Trình Xem Ảnh",
"sideBySide": "Cạnh Nhau",
"alwaysShowImageSizeBadge": "Luôn Hiển Thị Kích Thước Ảnh",
@@ -103,7 +104,7 @@
"displaySearch": "Tìm Kiếm Hình Ảnh",
"selectAnImageToCompare": "Chọn Ảnh Để So Sánh",
"slider": "Thanh Trượt",
"gallerySettings": "Cài Đặt Thư Viện Ảnh",
"gallerySettings": "Cài Đặt Thư Viện",
"image": "hình ảnh",
"noImageSelected": "Không Có Ảnh Được Chọn",
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
@@ -117,9 +118,7 @@
"unstarImage": "Ngừng Gắn Sao Cho Ảnh",
"compareHelp2": "Nhấn <Kbd>M</Kbd> để tuần hoàn trong chế độ so sánh.",
"boardsSettings": "Thiết Lập Bảng",
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
"assets": "Tài Nguyên",
"images": "Hình Ảnh"
"imagesSettings": "Cài Đặt Thư Viện Ảnh"
},
"common": {
"ipAdapter": "IP Adapter",
@@ -221,21 +220,7 @@
"tab": "Tab",
"loadingModel": "Đang Tải Model",
"generating": "Đang Tạo Sinh",
"warnings": "Cảnh Báo",
"count": "Đếm",
"step": "Bước",
"values": "Giá Trị",
"start": "Bắt Đầu",
"end": "Kết Thúc",
"min": "Tối Thiểu",
"max": "Tối Đa",
"resetToDefaults": "Đặt Lại Về Mặc Định",
"seed": "Hạt Giống",
"combinatorial": "Tổ Hợp",
"column": "Cột",
"layout": "Bố Cục",
"row": "Hàng",
"board": "Bảng"
"warnings": "Cảnh Báo"
},
"prompt": {
"addPromptTrigger": "Thêm Prompt Trigger",
@@ -290,7 +275,7 @@
"cancelBatch": "Huỷ Bỏ Lô",
"status": "Trạng Thái",
"pending": "Đang Chờ",
"gallery": "Thư Viện Ảnh",
"gallery": "Thư Viện",
"front": "trước",
"batch": "Lô",
"origin": "Nguồn Gốc",
@@ -309,14 +294,7 @@
"completedIn": "Hoàn tất trong",
"graphQueued": "Đồ Thị Đã Vào Hàng",
"batchQueuedDesc_other": "Thêm {{count}} phiên vào {{direction}} của hàng",
"batchSize": "Kích Thước Lô",
"cancelAllExceptCurrentQueueItemAlertDialog": "Huỷ tất cả mục đang xếp hàng ngoại trừ việc nó sẽ dừng các mục đang chờ nhưng cho phép các mục đang chạy được hoàn tất.",
"cancelAllExceptCurrentQueueItemAlertDialog2": "Bạn có chắc muốn huỷ tất cả mục đang chờ?",
"cancelAllExceptCurrentTooltip": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại",
"confirm": "Đồng Ý",
"retrySucceeded": "Mục Đã Thử Lại",
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
"retryItem": "Thử Lại Mục"
"batchSize": "Kích Thước Lô"
},
"hotkeys": {
"canvas": {
@@ -522,16 +500,16 @@
},
"gallery": {
"galleryNavRight": {
"desc": "Sang phải theo mạng lưới thư viện ảnh, chọn hình ảnh đó. Nếu đến cuối hàng, qua hàng tiếp theo. Nếu đến hình ảnh cuối cùng, qua trang tiếp theo.",
"desc": "Sang phải theo mạng lưới thư viện, chọn hình ảnh đó. Nếu đến cuối hàng, qua hàng tiếp theo. Nếu đến hình ảnh cuối cùng, qua trang tiếp theo.",
"title": "Sang Phải"
},
"galleryNavDown": {
"title": "Đi Xuống",
"desc": "Đi xuống theo mạng lưới thư viện ảnh, chọn hình ảnh đó. Nếu xuống cuối cùng trang, sang trang tiếp theo."
"desc": "Đi xuống theo mạng lưới thư viện, chọn hình ảnh đó. Nếu xuống cuối cùng trang, sang trang tiếp theo."
},
"galleryNavLeft": {
"title": "Sang Trái",
"desc": "Sang trái theo mạng lưới thư viện ảnh, chọn hình ảnh đó. Nếu đến đầu hàng, về lại hàng trước đó. Nếu đến hình ảnh đầu tiên, về lại trang trước đó."
"desc": "Sang trái theo mạng lưới thư viện, chọn hình ảnh đó. Nếu đến đầu hàng, về lại hàng trước đó. Nếu đến hình ảnh đầu tiên, về lại trang trước đó."
},
"galleryNavUpAlt": {
"title": "Đi Lên (So Sánh Ảnh)",
@@ -543,7 +521,7 @@
},
"galleryNavUp": {
"title": "Đi Lên",
"desc": "Đi lên theo mạng lưới thư viện ảnh, chọn hình ảnh đó. Nếu lên trên cùng trang, về lại trang trước đó."
"desc": "Đi lên theo mạng lưới thư viện, chọn hình ảnh đó. Nếu lên trên cùng trang, về lại trang trước đó."
},
"galleryNavRightAlt": {
"title": "Sang Phải (So Sánh Ảnh)",
@@ -553,7 +531,7 @@
"title": "Chọn Tất Cả Trên Trang",
"desc": "Chọn tất cả ảnh trên trang hiện tại."
},
"title": "Thư Viện Ảnh",
"title": "Thư Viện",
"galleryNavDownAlt": {
"title": "Đi Xuống (So Sánh Ảnh)",
"desc": "Giống với \"Đi Xuống\", nhưng là chọn ảnh được so sánh, mở chế độ so sánh nếu chưa được mở."
@@ -875,7 +853,7 @@
"removeLinearView": "Xoá Khỏi Chế Độ Xem Tuyến Tính",
"unknownErrorValidatingWorkflow": "Lỗi không rõ khi xác thực workflow",
"unableToLoadWorkflow": "Không Thể Tải Workflow",
"workflowSettings": "Cài Đặt Biên Tập Workflow",
"workflowSettings": "Cài Đặt Trình Biên Tập Viên Workflow",
"workflowVersion": "Phiên Bản",
"unableToGetWorkflowVersion": "Không thể tìm phiên bản của lược đồ workflow",
"collection": "Đa tài nguyên",
@@ -972,7 +950,7 @@
"versionUnknown": " Phiên Bản Không Rõ",
"workflowContact": "Thông Tin Liên Lạc",
"workflowName": "Tên",
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
"saveToGallery": "Lưu Vào Thư Viện",
"connectionWouldCreateCycle": "Kết nối này sẽ tạo ra vòng lặp",
"addNode": "Thêm Node",
"unsupportedAnyOfLength": "quá nhiều dữ liệu hợp nhất: {{count}}",
@@ -987,36 +965,7 @@
"outputFieldTypeParseError": "Không thể phân tích loại dữ liệu đầu ra của {{node}}.{{field}} ({{message}})",
"modelAccessError": "Không thể tìm thấy model {{key}}, chuyển về mặc định",
"internalDesc": "Trình kích hoạt này được dùng bên trong bởi Invoke. Nó có thể phá hỏng thay đổi trong khi cập nhật ứng dụng và có thể bị xoá bất cứ lúc nào.",
"specialDesc": "Trình kích hoạt này có một số xử lý đặc biệt trong ứng dụng. Ví dụ, Node Hàng Loạt được dùng để xếp vào nhiều đồ thị từ một workflow.",
"addItem": "Thêm Mục",
"generateValues": "Cho Ra Giá Trị",
"floatRangeGenerator": "Phạm Vị Tạo Ra Số Thực",
"integerRangeGenerator": "Phạm Vị Tạo Ra Số Nguyên",
"linearDistribution": "Phân Bố Tuyến Tính",
"uniformRandomDistribution": "Phân Bố Ngẫu Nhiên Đồng Nhất",
"parseString": "Phân Tích Chuỗi",
"noBatchGroup": "không có nhóm",
"generatorNoValues": "trống",
"splitOn": "Tách Ở",
"arithmeticSequence": "Cấp Số Cộng",
"generatorNRandomValues_other": "{{count}} giá trị ngẫu nhiên",
"generatorLoading": "đang tải",
"generatorLoadFromFile": "Tải Từ Tệp",
"dynamicPromptsRandom": "Dynamic Prompts (Ngẫu Nhiên)",
"dynamicPromptsCombinatorial": "Dynamic Prompts (Tổ Hợp)",
"missingSourceOrTargetNode": "Thiếu nguồn hoặc node mục tiêu",
"missingSourceOrTargetHandle": "Thiếu nguồn hoặc mục tiêu xử lý",
"deletedMissingNodeFieldFormElement": "Xóa vùng nhập bị thiếu: vùng {{fieldName}} của node {{nodeId}}",
"description": "Mô Tả",
"loadWorkflowDesc": "Tải workflow?",
"loadWorkflowDesc2": "Workflow hiện tại của bạn có những điều chỉnh chưa được lưu.",
"loadingTemplates": "Đang Tải {{name}}",
"nodeName": "Tên Node",
"unableToUpdateNode": "Cập nhật node thất bại: node {{node}} thuộc dạng {{type}} (có thể cần xóa và tạo lại)",
"downloadWorkflowError": "Lỗi tải xuống workflow",
"generatorImagesFromBoard": "Ảnh Từ Bảng",
"generatorImagesCategory": "Phân Loại",
"generatorImages_other": "{{count}} ảnh"
"specialDesc": "Trình kích hoạt này có một số xử lý đặc biệt trong ứng dụng. Ví dụ, Node Hàng Loạt được dùng để xếp vào nhiều đồ thị từ một workflow."
},
"popovers": {
"paramCFGRescaleMultiplier": {
@@ -1484,25 +1433,13 @@
"missingNodeTemplate": "Thiếu mẫu trình bày node",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều dài hộp giới hạn là {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều rộng hộp giới hạn là {{width}}",
"missingInputForField": "thiếu đầu vào",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: thiếu đầu vào",
"missingFieldTemplate": "Thiếu vùng mẫu trình bày",
"collectionTooFewItems": "quá ít mục, ti thiểu là {{minItems}}",
"collectionTooManyItems": "quá nhiều mục, tối đa là {{maxItems}}",
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} tài nguyên trống",
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: quá ít mục, tối thiểu {{minItems}}",
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: quá nhiều mục, tối đa {{maxItems}}",
"canvasIsSelectingObject": "Canvas đang bận (đang chọn đồ vật)",
"fluxModelMultipleControlLoRAs": "Chỉ có thể dùng 1 LoRA Điều Khiển Được",
"collectionStringTooLong": "quá dài, tối đa là {{maxLength}}",
"collectionStringTooShort": "quá ngắn, tối thiểu là {{minLength}}",
"collectionNumberGTMax": "{{value}} > {{maximum}} (giá trị tối đa)",
"collectionNumberLTMin": "{{value}} < {{minimum}} (giá trị tối thiểu)",
"collectionNumberNotMultipleOf": "{{value}} không phải bội của {{multipleOf}}",
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (giá trị chọn lọc tối thiểu)",
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (giá trị chọn lọc tối đa)",
"batchNodeCollectionSizeMismatch": "Kích cỡ tài nguyên không phù hợp với Lô {{batchGroupId}}",
"emptyBatches": "lô trống",
"batchNodeNotConnected": "Node Hàng Loạt chưa được kết nối: {{label}}",
"batchNodeEmptyCollection": "Một vài node hàng loạt có tài nguyên rỗng",
"collectionEmpty": "tài nguyên trống",
"batchNodeCollectionSizeMismatchNoGroupId": "tài nguyên theo nhóm có kích thước sai lệch"
"fluxModelMultipleControlLoRAs": "Chỉ có thể dùng 1 LoRA Điều Khiển Được"
},
"cfgScale": "Thang CFG",
"useSeed": "Dùng Hạt Giống",
@@ -1521,8 +1458,8 @@
"recallMetadata": "Gợi Lại Metadata",
"clipSkip": "CLIP Skip",
"general": "Cài Đặt Chung",
"boxBlur": "Làm Mờ Dạng Box",
"gaussianBlur": "Làm Mờ Dạng Gaussian",
"boxBlur": "Box Blur",
"gaussianBlur": "Gaussian Blur",
"staged": "Staged (Tăng khử nhiễu có hệ thống)",
"scaledHeight": "Tỉ Lệ Dài",
"cancel": {
@@ -1575,12 +1512,11 @@
"perPromptLabel": "Một Hạt Giống Mỗi Ảnh",
"perIterationLabel": "Hạt Giống Mỗi Lần Lặp Lại"
},
"loading": "Tạo Sinh Bằng Dynamic Prompt...",
"loading": "Tạo Sinh ng Dynamic Prompt...",
"showDynamicPrompts": "HIện Dynamic Prompt",
"maxPrompts": "Số Lệnh Tối Đa",
"promptsPreview": "Xem Trước Lệnh",
"dynamicPrompts": "Dynamic Prompt",
"promptsToGenerate": "Lệnh Để Tạo Sinh"
"dynamicPrompts": "Dynamic Prompt"
},
"settings": {
"beta": "Beta",
@@ -1604,14 +1540,14 @@
"clearIntermediates": "Dọn Sạch Sản Phẩm Trung Gian",
"clearIntermediatesDisabled": "Hàng đợi phải trống để dọn dẹp các sản phẩm trung gian",
"clearIntermediatesDesc1": "Dọn dẹp các sản phẩm trung gian sẽ làm mới trạng thái của Canvas và ControlNet.",
"clearIntermediatesDesc2": "Các sản phẩm ảnh trung gian là sản phẩm phụ trong quá trình tạo sinh, khác với ảnh trong thư viện ảnh. Xoá sản phẩm trung gian sẽ giúp làm trống ổ đĩa.",
"clearIntermediatesDesc2": "Các sản phẩm ảnh trung gian là sản phẩm phụ trong quá trình tạo sinh, khác với ảnh trong thư viện. Xoá sản phẩm trung gian sẽ giúp làm trống ổ đĩa.",
"resetWebUI": "Khởi Động Lại Giao Diện Web",
"showProgressInViewer": "Hiển Thị Hình Ảnh Đang Xử Lý Trong Trình Xem",
"ui": "Giao Diện Người Dùng",
"clearIntermediatesDesc3": "Ảnh trong thư viện ảnh sẽ không bị xoá.",
"clearIntermediatesDesc3": "Ảnh trong thư viện sẽ không bị xoá.",
"informationalPopoversDisabled": "Hộp Thoại Hỗ Trợ Thông Tin Đã Tắt",
"resetComplete": "Giao diện web đã được khởi động lại.",
"resetWebUIDesc2": "Nếu ảnh không được xuất hiện trong thư viện ảnh hoặc điều gì đó không ổn đang diễn ra, hãy thử khởi động lại trước khi báo lỗi trên Github.",
"resetWebUIDesc2": "Nếu ảnh không được xuất hiện trong thư viện hoặc điều gì đó không ổn đang diễn ra, hãy thử khởi động lại trước khi báo lỗi trên Github.",
"displayInProgress": "Hiển Thị Hình Ảnh Đang Xử Lý",
"intermediatesClearedFailed": "Có Vấn Đề Khi Dọn Sạch Sản Phẩm Trung Gian",
"enableInvisibleWatermark": "Bật Chế Độ Ẩn Watermark",
@@ -1639,7 +1575,7 @@
"width": "Chiều Rộng",
"negativePrompt": "Lệnh Tiêu Cực",
"removeBookmark": "Bỏ Đánh Dấu",
"saveBboxToGallery": "Lưu Hộp Giới Hạn Vào Thư Viện Ảnh",
"saveBboxToGallery": "Lưu Hộp Giới Hạn Vào Thư Viện",
"global": "Toàn Vùng",
"pullBboxIntoReferenceImageError": "Có Vấn Đề Khi Chuyển Hộp Giới Hạn Thành Ảnh Mẫu",
"clearHistory": "Xoá Lịch Sử",
@@ -1647,12 +1583,12 @@
"mergeVisibleOk": "Đã gộp layer",
"saveLayerToAssets": "Lưu Layer Vào Khu Tài Nguyên",
"canvas": "Canvas",
"savedToGalleryOk": "Đã Lưu Vào Thư Viện Ảnh",
"savedToGalleryOk": "Đã Lưu Vào Thư Viện",
"addGlobalReferenceImage": "Thêm $t(controlLayers.globalReferenceImage)",
"clipToBbox": "Chuyển Nét Thành Hộp Giới Hạn",
"moveToFront": "Chuyển Lên Trước",
"mergeVisible": "Gộp Layer Đang Hiển Thị",
"savedToGalleryError": "Lỗi khi lưu vào thư viện ảnh",
"savedToGalleryError": "Lỗi khi lưu vào thư viện",
"moveToBack": "Chuyển Về Sau",
"moveBackward": "Chuyển Xuống Cuối",
"newGlobalReferenceImageError": "Có Vấn Đề Khi Tạo Ảnh Mẫu Toàn Vùng",
@@ -1672,7 +1608,7 @@
"regional": "Khu Vực",
"regionIsEmpty": "Vùng được chọn trống",
"bookmark": "Đánh Dấu Để Đổi Nhanh",
"saveCanvasToGallery": "Lưu Canvas Vào Thư Viện Ảnh",
"saveCanvasToGallery": "Lưu Canvas Vào Thư Viện",
"cropLayerToBbox": "Xén Layer Vào Hộp Giới Hạn",
"mergeDown": "Gộp Xuống",
"mergeVisibleError": "Lỗi khi gộp layer",
@@ -1740,11 +1676,11 @@
"pullBboxIntoLayer": "Chuyển Hộp Giới Hạn Vào Layer",
"addInpaintMask": "Thêm $t(controlLayers.inpaintMask)",
"addRegionalGuidance": "Thêm $t(controlLayers.regionalGuidance)",
"sendToGallery": "Đã Chuyển Tới Thư Viện Ảnh",
"sendToGallery": "Chuyển Tới Thư Viện",
"unlocked": "Mở Khoá",
"addReferenceImage": "Thêm $t(controlLayers.referenceImage)",
"sendingToCanvas": "Chuyển Ảnh Tạo Sinh Vào Canvas",
"sendingToGallery": "Chuyển Ảnh Tạo Sinh Vào Thư Viện Ảnh",
"sendingToGallery": "Chuyển Ảnh Tạo Sinh Vào Thư Viện",
"viewProgressOnCanvas": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Canvas</Btn>.",
"inpaintMask_withCount_other": "Lớp Phủ Inpaint",
"regionalGuidance_withCount_other": "Chỉ Dẫn Khu Vực",
@@ -1755,7 +1691,7 @@
"copyRasterLayerTo": "Sao Chép $t(controlLayers.rasterLayer) Tới",
"copyControlLayerTo": "Sao Chép $t(controlLayers.controlLayer) Tới",
"newRegionalGuidance": "$t(controlLayers.regionalGuidance) Mới",
"newGallerySessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến thư viện ảnh.",
"newGallerySessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến thư viện.",
"stagingOnCanvas": "Hiển thị hình ảnh lên",
"pullBboxIntoReferenceImage": "Chuyển Hộp Giới Hạn Vào Ảnh Mẫu",
"maskFill": "Lấp Đầy Lớp Phủ",
@@ -1777,8 +1713,8 @@
"deleteReferenceImage": "Xoá Ảnh Mẫu",
"inpaintMasks_withCount_visible": "Lớp Phủ Inpaint ({{count}})",
"disableTransparencyEffect": "Tắt Hiệu Ứng Trong Suốt",
"newGallerySession": "Phiên Thư Viện Ảnh Mới",
"sendToGalleryDesc": "Bấm 'Kích Hoạt' sẽ tiến hành tạo sinh và lưu ảnh vào thư viện ảnh.",
"newGallerySession": "Phiên Thư Viện Mới",
"sendToGalleryDesc": "Bấm 'Kích Hoạt' sẽ tiến hành tạo sinh và lưu ảnh vào thư viện.",
"opacity": "Độ Mờ Đục",
"rectangle": "Hình Chữ Nhật",
"addNegativePrompt": "Thêm $t(controlLayers.negativePrompt)",
@@ -1813,24 +1749,21 @@
"process": "Xử Lý"
},
"canvasContextMenu": {
"saveBboxToGallery": "Lưu Hộp Giới Hạn Vào Thư Viện Ảnh",
"saveBboxToGallery": "Lưu Hộp Giới Hạn Vào Thư Viện",
"newGlobalReferenceImage": "Ảnh Mẫu Toàn Vùng Mới",
"cropCanvasToBbox": "Xén Canvas Vào Hộp Giới Hạn",
"newRegionalGuidance": "Chỉ Dẫn Khu Vực Mới",
"saveToGalleryGroup": "Lưu Vào Thư Viện Ảnh",
"saveToGalleryGroup": "Lưu Vào Thư Viện",
"newInpaintMask": "Lớp Phủ Inpaint Mới",
"saveCanvasToGallery": "Lưu Canvas Vào Thư Viện Ảnh",
"saveCanvasToGallery": "Lưu Canvas Vào Thư Viện",
"newRegionalReferenceImage": "Ảnh Mẫu Khu Vực Mới",
"newControlLayer": "Layer Điều Khiển Được Mới",
"newRasterLayer": "Layer Dạng Raster Mới",
"bboxGroup": "Được Tạo Từ Hộp Giới Hạn",
"canvasGroup": "Canvas",
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
"copyToClipboard": "Sao Chép Vào Clipboard",
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
"canvasGroup": "Canvas"
},
"stagingArea": {
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
"saveToGallery": "Lưu Vào Thư Viện",
"accept": "Chấp Nhận",
"discard": "Bỏ Đi",
"previous": "Trước",
@@ -1926,49 +1859,7 @@
},
"advanced": "Nâng Cao",
"processingLayerWith": "Đang xử lý layer với bộ lọc {{type}}.",
"forMoreControl": "Để kiểm soát tốt hơn, bấm vào mục Nâng Cao bên dưới.",
"img_blur": {
"description": "Làm mờ layer được chọn.",
"blur_type": "Dạng Làm Mờ",
"blur_radius": "Radius",
"gaussian_type": "Gaussian",
"label": "Làm Mờ Ảnh",
"box_type": "Box"
},
"img_noise": {
"salt_and_pepper_type": "Salt and Pepper",
"noise_amount": "Lượng Nhiễu",
"label": "Độ Nhiễu Ảnh",
"description": "Tăng độ nhiễu vào layer được chọn.",
"noise_type": "Dạng Nhiễu",
"gaussian_type": "Gaussian",
"noise_color": "Màu Nhiễu",
"size": "Cỡ Nhiễu"
},
"adjust_image": {
"channel": "Kênh Màu",
"cyan": "Lục Lam (Cmyk)",
"value_setting": "Giá Trị",
"scale_values": "Giá Trị Theo Tỉ Lệ",
"red": "Đỏ (Rgba)",
"green": "Lục (rGba)",
"blue": "Lam (rgBa)",
"alpha": "Độ Trong Suốt (rgbA)",
"luminosity": "Độ Sáng (Lab)",
"magenta": "Hồng Đỏ (cMyk)",
"yellow": "Vàng (cmYk)",
"description": "Điều chỉnh kênh màu được chọn của ảnh.",
"black": "Đen (cmyK)",
"cr": "Cr (ycC)",
"label": "Điều Chỉnh Ảnh",
"value": "Độ Sáng (hsV)",
"saturation": "Độ Bão Hoà (hSv)",
"hue": "Vùng Màu (Hsv)",
"a": "A (lAb)",
"b": "B (laB)",
"y": "Y (Ycc)",
"cb": "Cb (yCc)"
}
"forMoreControl": "Để kiểm soát tốt hơn, bấm vào mục Nâng Cao bên dưới."
},
"transform": {
"fitModeCover": "Che Phủ",
@@ -2046,20 +1937,6 @@
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
"rgNoRegion": "không có khu vực được vẽ"
},
"pasteTo": "Dán Vào",
"pasteToAssets": "Tài Nguyên",
"pasteToAssetsDesc": "Dán Vào Tài Nguyên",
"pasteToBbox": "Hộp Giới Hạn",
"pasteToBboxDesc": "Layer Mới (Trong Hộp Giới Hạn)",
"pasteToCanvas": "Canvas",
"pasteToCanvasDesc": "Layer Mới (Trong Canvas)",
"pastedTo": "Dán Vào {{destination}}",
"regionCopiedToClipboard": "Sao Chép {{region}} Vào Clipboard",
"copyRegionError": "Lỗi khi sao chép {{region}}",
"errors": {
"unableToLoadImage": "Không Thể Tải Hình Ảnh",
"unableToFindImage": "Không Thể Tìm Hình Ảnh"
}
},
"stylePresets": {
@@ -2112,7 +1989,7 @@
"enableLogging": "Bật Chế Độ Ghi Log",
"logNamespaces": {
"models": "Models",
"gallery": "Thư Viện Ảnh",
"gallery": "Thư Viện",
"config": "Cấu Hình",
"queue": "Queue",
"workflows": "Workflow",
@@ -2167,7 +2044,7 @@
"parameterSetDesc": "Gợi lại {{parameter}}",
"loadedWithWarnings": "Đã Tải Workflow Với Cảnh Báo",
"outOfMemoryErrorDesc": "Thiết lập tạo sinh hiện tại đã vượt mức cho phép của thiết bị. Hãy điều chỉnh thiết lập và thử lại.",
"setNodeField": "Đặt làm vùng node",
"setNodeField": "Đặt làm vùng cho node",
"problemRetrievingWorkflow": "Có Vấn Đề Khi Lấy Lại Workflow",
"somethingWentWrong": "Có Vấn Đề Phát Sinh",
"problemDeletingWorkflow": "Có Vấn Đề Khi Xoá Workflow",
@@ -2190,17 +2067,11 @@
"problemCopyingImage": "Không Thể Sao Chép Ảnh",
"problemDownloadingImage": "Không Thể Tải Xuống Ảnh",
"problemCopyingLayer": "Không Thể Sao Chép Layer",
"problemSavingLayer": "Không Thể Lưu Layer",
"outOfMemoryErrorDescLocal": "Làm theo <LinkComponent>hướng dẫn VRAM Thấp</LinkComponent> của chúng tôi để hạn chế OOM (Tràn bộ nhớ).",
"unableToCopy": "Không Thể Sao Chép",
"unableToCopyDesc_theseSteps": "các bước sau",
"unableToCopyDesc": "Trình duyệt của bạn không hỗ trợ tính năng clipboard. Người dùng Firefox có thể khắc phục theo ",
"pasteSuccess": "Dán Vào {{destination}}",
"pasteFailed": "Dán Thất Bại"
"problemSavingLayer": "Không Thể Lưu Layer"
},
"ui": {
"tabs": {
"gallery": "Thư Viện Ảnh",
"gallery": "Thư Viện",
"models": "Models",
"generation": "Generation (Máy Tạo Sinh)",
"upscaling": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)",
@@ -2232,7 +2103,7 @@
"savingWorkflow": "Đang Lưu Workflow...",
"ascending": "Tăng Dần",
"loading": "Đang Tải Workflow",
"chooseWorkflowFromLibrary": "Chọn Workflow Từ Thư Viện",
"chooseWorkflowFromLibrary": "Chọn Workflow Từ Túi Đồ",
"workflows": "Workflow",
"copyShareLinkForWorkflow": "Sao Chép Liên Kết Chia Sẻ Cho Workflow",
"openWorkflow": "Mở Workflow",
@@ -2252,42 +2123,11 @@
"convertGraph": "Chuyển Đổi Đồ Thị",
"saveWorkflowToProject": "Lưu Workflow Vào Dự Án",
"workflowName": "Tên Workflow",
"workflowLibrary": "Thư Viện",
"workflowLibrary": "Túi Đồ",
"opened": "Ngày Mở",
"deleteWorkflow": "Xoá Workflow",
"workflowEditorMenu": "Menu Biên Tập Workflow",
"uploadAndSaveWorkflow": "Tải Lên Thư Viện",
"openLibrary": "Mở Thư Viện",
"builder": {
"resetAllNodeFields": "Tải Lại Các Vùng Node",
"builder": "Trình Tạo Vùng Nhập",
"layout": "Bố Cục",
"row": "Hàng",
"zoomToNode": "Phóng To Vào Node",
"addToForm": "Thêm Vào Vùng Nhập",
"label": "Nhãn Tên",
"showDescription": "Hiện Dòng Mô Tả",
"component": "Thành Phần",
"numberInput": "Nhập Số",
"singleLine": "Một Dòng",
"multiLine": "Nhiều Dòng",
"slider": "Thanh Trượt",
"both": "Cả Hai",
"emptyRootPlaceholderViewMode": "Chọn Chỉnh Sửa để bắt đầu tạo nên một vùng nhập cho workflow này.",
"emptyRootPlaceholderEditMode": "Kéo thành phần vùng nhập hoặc vùng node vào đây để bắt đầu.",
"containerPlaceholder": "Hộp Chứa Trống",
"headingPlaceholder": "Đầu Dòng Trống",
"textPlaceholder": "Văn Bản Trống",
"column": "Cột",
"deleteAllElements": "Xóa Tất Cả Thành Phần",
"nodeField": "Vùng Node",
"nodeFieldTooltip": "Để thêm vùng node, bấm vào dấu cộng nhỏ trên vùng trong Trình Biên Tập Workflow, hoặc kéo vùng theo tên của nó vào vùng nhập.",
"workflowBuilderAlphaWarning": "Trình tạo vùng nhập đang trong giai đoạn alpha. Nó có thể xuất hiện những thay đổi đột ngột trước khi chính thức được phát hành.",
"container": "Hộp Chứa",
"heading": "Đầu Dòng",
"text": "Văn Bản",
"divider": "Gạch Chia"
}
"workflowEditorMenu": "Menu Biên Tập Viên Workflow",
"uploadAndSaveWorkflow": "Tải Lên Túi Đồ"
},
"upscaling": {
"missingUpscaleInitialImage": "Thiếu ảnh dùng để upscale",
@@ -2310,11 +2150,12 @@
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale."
},
"newUserExperience": {
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
"gettingStartedSeries": "Cần thêm hướng dẫn? Xem thử <LinkComponent>Bắt Đầu Làm Quen</LinkComponent> để biết thêm mẹo khai thác toàn bộ tiềm năng của Invoke Studio.",
"toGetStarted": "Để bắt đầu, hãy nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
"noModelsInstalled": "Dường như bạn chưa tải model nào cả! Bạn có thể <DownloadStarterModelsButton>tải xuống các model khởi đầu</DownloadStarterModelsButton> hoặc <ImportModelsButton>nhập vào thêm model</ImportModelsButton>.",
"lowVRAMMode": "Cho hiệu suất tốt nhất, hãy làm theo <LinkComponent>hướng dẫn VRAM Thấp</LinkComponent> của chúng tôi."
"toGetStarted": "Để bắt đầu, hãy nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
"downloadStarterModels": "Tải Xuống Model Khởi Đầu",
"importModels": "Nhập Vào Model",
"noModelsInstalled": "Hình như bạn không có model nào được tải cả"
},
"whatsNew": {
"whatsNewInInvoke": "Có Gì Mới Ở Invoke",
@@ -2322,8 +2163,7 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"Trình Biên Tập Workflow: trình tạo vùng nhập dưới dạng kéo thả nhằm tạo dựng workflow dễ dàng hơn.",
"Các nâng cấp khác: Xếp hàng tạo sinh theo nhóm nhanh hơn, upscale tốt hơn, trình chọn màu được cải thiện, và node chứa metadata."
"<StrongComponent>Hướng Dẫn Khu Vực FLUX (beta)</StrongComponent>: Bản beta của Hướng Dẫn Khu Vực FLUX của chúng ta đã có mắt tại bảng điều khiển lệnh khu vực."
]
},
"upsell": {
@@ -2355,8 +2195,8 @@
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
},
"howDoIGenerateAndSaveToTheGallery": {
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện Ảnh?",
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện ảnh."
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện?",
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện."
},
"howDoIEditOnTheCanvas": {
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
@@ -2393,10 +2233,5 @@
},
"controlCanvas": "Điều Khiển Canvas",
"watch": "Xem"
},
"modelCache": {
"clearSucceeded": "Cache Model Đã Được Dọn",
"clearFailed": "Có Vấn Đề Khi Dọn Cache Model",
"clear": "Dọn Cache Model"
}
}

View File

@@ -107,6 +107,7 @@
"noImagesInGallery": "无图像可用于显示",
"deleteImage_other": "删除{{count}}张图片",
"deleteImagePermanent": "删除的图片无法被恢复。",
"assets": "素材",
"autoAssignBoardOnClick": "点击后自动分配面板",
"featuresWillReset": "如果您删除该图像,这些功能会立即被重置。",
"loading": "加载中",

View File

@@ -1,25 +1,21 @@
import { Box, useGlobalModifiersInit } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import { $didStudioInit, useStudioInitAction } from 'app/hooks/useStudioInitAction';
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
import { useLogger } from 'app/logging/useLogger';
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import { useFocusRegionWatcher } from 'common/hooks/focus';
import { useClearStorage } from 'common/hooks/useClearStorage';
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
import {
NewCanvasSessionDialog,
NewGallerySessionDialog,
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
@@ -27,9 +23,7 @@ import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModa
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/WorkflowListMenu/ShareWorkflowModal';
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
import { useReadinessWatcher } from 'features/queue/store/readiness';
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
@@ -38,7 +32,6 @@ import { configChanged } from 'features/system/store/configSlice';
import { selectLanguage } from 'features/system/store/systemSelectors';
import { AppContent } from 'features/ui/components/AppContent';
import { DeleteWorkflowDialog } from 'features/workflowLibrary/components/DeleteLibraryWorkflowConfirmationAlertDialog';
import { LoadWorkflowConfirmationAlertDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import { NewWorkflowConfirmationAlertDialog } from 'features/workflowLibrary/components/NewWorkflowConfirmationAlertDialog';
import i18n from 'i18n';
import { size } from 'lodash-es';
@@ -57,30 +50,55 @@ interface Props {
}
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
const didStudioInit = useStore($didStudioInit);
const language = useAppSelector(selectLanguage);
const logger = useLogger('system');
const dispatch = useAppDispatch();
const clearStorage = useClearStorage();
// singleton!
useSocketIO();
useGlobalModifiersInit();
useGlobalHotkeys();
useGetOpenAPISchemaQuery();
useSyncLoggingConfig();
const handleReset = useCallback(() => {
clearStorage();
location.reload();
return false;
}, [clearStorage]);
useEffect(() => {
i18n.changeLanguage(language);
}, [language]);
useEffect(() => {
if (size(config)) {
logger.info({ config }, 'Received config');
dispatch(configChanged(config));
}
}, [dispatch, config, logger]);
useEffect(() => {
dispatch(appStarted());
}, [dispatch]);
useStudioInitAction(studioInitAction);
useStarterModelsToast();
useSyncQueueStatus();
useFocusRegionWatcher();
return (
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
<AppContent />
{!didStudioInit && <Loading />}
</Box>
<HookIsolator config={config} studioInitAction={studioInitAction} />
<DeleteImageModal />
<ChangeBoardModal />
<DynamicPromptsModal />
<StylePresetModal />
<CancelAllExceptCurrentQueueItemConfirmationAlertDialog />
<ClearQueueConfirmationsAlertDialog />
<NewWorkflowConfirmationAlertDialog />
<LoadWorkflowConfirmationAlertDialog />
<DeleteStylePresetDialog />
<DeleteWorkflowDialog />
<ShareWorkflowModal />
@@ -92,51 +110,8 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
<ImageContextMenu />
<FullscreenDropzone />
<VideosModal />
<CanvasManagerProviderGate>
<CanvasPasteModal />
</CanvasManagerProviderGate>
</ErrorBoundary>
);
};
export default memo(App);
// Running these hooks in a separate component ensures we do not inadvertently rerender the entire app when they change.
const HookIsolator = memo(
({ config, studioInitAction }: { config: PartialAppConfig; studioInitAction?: StudioInitAction }) => {
const language = useAppSelector(selectLanguage);
const logger = useLogger('system');
const dispatch = useAppDispatch();
// singleton!
useReadinessWatcher();
useSocketIO();
useGlobalModifiersInit();
useGlobalHotkeys();
useGetOpenAPISchemaQuery();
useSyncLoggingConfig();
useEffect(() => {
i18n.changeLanguage(language);
}, [language]);
useEffect(() => {
if (size(config)) {
logger.info({ config }, 'Received config');
dispatch(configChanged(config));
}
}, [dispatch, config, logger]);
useEffect(() => {
dispatch(appStarted());
}, [dispatch]);
useStudioInitAction(studioInitAction);
useStarterModelsToast();
useSyncQueueStatus();
useFocusRegionWatcher();
return null;
}
);
HookIsolator.displayName = 'HookIsolator';

View File

@@ -1,7 +1,6 @@
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { useClipboard } from 'common/hooks/useClipboard';
import { selectConfigSlice } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import newGithubIssueUrl from 'new-github-issue-url';
@@ -21,17 +20,15 @@ const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLoc
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
const { t } = useTranslation();
const isLocal = useAppSelector(selectIsLocal);
const clipboard = useClipboard();
const handleCopy = useCallback(() => {
const text = JSON.stringify(serializeError(error), null, 2);
clipboard.writeText(`\`\`\`\n${text}\n\`\`\``, () => {
toast({
id: 'ERROR_COPIED',
title: t('toast.errorCopied'),
});
navigator.clipboard.writeText(`\`\`\`\n${text}\n\`\`\``);
toast({
id: 'ERROR_COPIED',
title: t('toast.errorCopied'),
});
}, [clipboard, error, t]);
}, [error, t]);
const url = useMemo(() => {
if (isLocal) {

Some files were not shown because too many files have changed in this diff Show More