mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 08:57:57 -05:00
Compare commits
4 Commits
v5.7.2
...
psyche/per
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae8459f221 | ||
|
|
b73f15d4a2 | ||
|
|
5e1974d924 | ||
|
|
3011dfca16 |
11
.github/workflows/build-container.yml
vendored
11
.github/workflows/build-container.yml
vendored
@@ -76,6 +76,9 @@ jobs:
|
||||
latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
suffix=-${{ matrix.gpu-driver }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
@@ -100,7 +103,7 @@ jobs:
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: |
|
||||
# type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
# type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
# cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
|
||||
@@ -13,63 +13,48 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
git
|
||||
|
||||
# Install `uv` for package management
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.0 /uv /uvx /bin/
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.5.5 /uv /uvx /bin/
|
||||
|
||||
ENV VIRTUAL_ENV=/opt/venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV PYTHON_VERSION=3.11
|
||||
ENV UV_PYTHON=3.11
|
||||
ENV UV_COMPILE_BYTECODE=1
|
||||
ENV UV_LINK_MODE=copy
|
||||
ENV UV_PROJECT_ENVIRONMENT="$VIRTUAL_ENV"
|
||||
ENV UV_INDEX="https://download.pytorch.org/whl/cu124"
|
||||
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Switch to the `ubuntu` user to work around dependency issues with uv-installed python
|
||||
RUN mkdir -p ${VIRTUAL_ENV} && \
|
||||
mkdir -p ${INVOKEAI_SRC} && \
|
||||
chmod -R a+w /opt && \
|
||||
mkdir ~ubuntu/.cache && chown ubuntu: ~ubuntu/.cache
|
||||
chmod -R a+w /opt
|
||||
USER ubuntu
|
||||
|
||||
# Install python
|
||||
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
||||
uv python install ${PYTHON_VERSION}
|
||||
# Install python and create the venv
|
||||
RUN uv python install ${PYTHON_VERSION} && \
|
||||
uv venv --relocatable --prompt "invoke" --python ${PYTHON_VERSION} ${VIRTUAL_ENV}
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
|
||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||
#
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is the default
|
||||
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
|
||||
fi && \
|
||||
uv sync --no-install-project
|
||||
|
||||
# Now that the bulk of the dependencies have been installed, copy in the project files that change more frequently.
|
||||
COPY invokeai invokeai
|
||||
COPY pyproject.toml .
|
||||
|
||||
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
UV_INDEX="https://download.pytorch.org/whl/rocm6.1"; \
|
||||
fi && \
|
||||
uv sync
|
||||
|
||||
uv pip install --python ${PYTHON_VERSION} $extra_index_url_arg -e "."
|
||||
|
||||
#### Build the Web UI ------------------------------------
|
||||
|
||||
@@ -113,7 +98,6 @@ RUN apt update && apt install -y --no-install-recommends \
|
||||
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv
|
||||
ENV UV_PROJECT_ENVIRONMENT="$VIRTUAL_ENV"
|
||||
ENV PYTHON_VERSION=3.11
|
||||
ENV INVOKEAI_ROOT=/invokeai
|
||||
ENV INVOKEAI_HOST=0.0.0.0
|
||||
@@ -125,7 +109,7 @@ ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
# Install `uv` for package management
|
||||
# and install python for the ubuntu user (expected to exist on ubuntu >=24.x)
|
||||
# this is too tiny to optimize with multi-stage builds, but maybe we'll come back to it
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.0 /uv /uvx /bin/
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.5.5 /uv /uvx /bin/
|
||||
USER ubuntu
|
||||
RUN uv python install ${PYTHON_VERSION}
|
||||
USER root
|
||||
|
||||
138
docs/RELEASE.md
138
docs/RELEASE.md
@@ -1,50 +1,41 @@
|
||||
# Release Process
|
||||
|
||||
The Invoke application is published as a python package on [PyPI]. This includes both a source distribution and built distribution (a wheel).
|
||||
The app is published in twice, in different build formats.
|
||||
|
||||
Most users install it with the [Launcher](https://github.com/invoke-ai/launcher/), others with `pip`.
|
||||
|
||||
The launcher uses GitHub as the source of truth for available releases.
|
||||
|
||||
## Broad Strokes
|
||||
|
||||
- Merge all changes and bump the version in the codebase.
|
||||
- Tag the release commit.
|
||||
- Wait for the release workflow to complete.
|
||||
- Approve the PyPI publish jobs.
|
||||
- Write GH release notes.
|
||||
- A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build.
|
||||
- An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs.
|
||||
|
||||
## General Prep
|
||||
|
||||
Make a developer call-out for PRs to merge. Merge and test things out. Bump the version by editing `invokeai/version/invokeai_version.py`.
|
||||
Make a developer call-out for PRs to merge. Merge and test things out.
|
||||
|
||||
While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build.
|
||||
|
||||
## Release Workflow
|
||||
|
||||
The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
|
||||
|
||||
It is triggered on **tag push**, when the tag matches `v*`.
|
||||
It is triggered on **tag push**, when the tag matches `v*`. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same.
|
||||
|
||||
> Because commits are reference-counted, it is safe to create a release branch, tag it, let the workflow run, then delete the branch. So long as the tag exists, that commit will exist.
|
||||
|
||||
### Triggering the Workflow
|
||||
|
||||
Ensure all commits that should be in the release are merged, and you have pulled them locally.
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow.
|
||||
|
||||
Double-check that you have checked out the commit that will represent the release (typically the latest commit on `main`).
|
||||
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow. You will be prompted to provide a message - use the version specifier.
|
||||
|
||||
If this version's tag already exists for some reason (maybe you had to make a last minute change), the script will overwrite it.
|
||||
|
||||
> In case you cannot use the Make target, the release may also be dispatched [manually] via GH.
|
||||
The release may also be dispatched [manually].
|
||||
|
||||
### Workflow Jobs and Process
|
||||
|
||||
The workflow consists of a number of concurrently-run checks and tests, then two final publish jobs.
|
||||
The workflow consists of a number of concurrently-run jobs, and two final publish jobs.
|
||||
|
||||
The publish jobs require manual approval and are only run if the other jobs succeed.
|
||||
|
||||
#### `check-version` Job
|
||||
|
||||
This job ensures that the `invokeai` python package version specifier matches the tag for the release. The version specifier is pulled from the `__version__` variable in `invokeai/version/invokeai_version.py`.
|
||||
This job checks that the git ref matches the app version. It matches the ref against the `__version__` variable in `invokeai/version/invokeai_version.py`.
|
||||
|
||||
When the workflow is triggered by tag push, the ref is the tag. If the workflow is run manually, the ref is the target selected from the **Use workflow from** dropdown.
|
||||
|
||||
This job uses [samuelcolvin/check-python-version].
|
||||
|
||||
@@ -52,52 +43,62 @@ This job uses [samuelcolvin/check-python-version].
|
||||
|
||||
#### Check and Test Jobs
|
||||
|
||||
Next, these jobs run and must pass. They are the same jobs that are run for every PR.
|
||||
|
||||
- **`python-tests`**: runs `pytest` on matrix of platforms
|
||||
- **`python-checks`**: runs `ruff` (format and lint)
|
||||
- **`frontend-tests`**: runs `vitest`
|
||||
- **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports)
|
||||
- **`typegen-checks`**: ensures the frontend and backend types are synced
|
||||
|
||||
> **TODO** We should add `mypy` or `pyright` to the **`check-python`** job.
|
||||
|
||||
> **TODO** We should add an end-to-end test job that generates an image.
|
||||
|
||||
#### `build-installer` Job
|
||||
|
||||
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts:
|
||||
|
||||
- **`dist`**: the python distribution, to be published on PyPI
|
||||
- **`InvokeAI-installer-${VERSION}.zip`**: the legacy install scripts
|
||||
|
||||
You don't need to download either of these files.
|
||||
|
||||
> The legacy install scripts are no longer used, but we haven't updated the workflow to skip building them.
|
||||
- **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release
|
||||
|
||||
#### Sanity Check & Smoke Test
|
||||
|
||||
At this point, the release workflow pauses as the remaining publish jobs require approval.
|
||||
At this point, the release workflow pauses as the remaining publish jobs require approval. Time to test the installer.
|
||||
|
||||
It's possible to test the python package before it gets published to PyPI. We've never had problems with it, so it's not necessary to do this.
|
||||
Because the installer pulls from PyPI, and we haven't published to PyPI yet, you will need to install from the wheel:
|
||||
|
||||
But, if you want to be extra-super careful, here's how to test it:
|
||||
- Download and unzip `dist.zip` and the installer from the **Summary** tab of the workflow
|
||||
- Run the installer script using the `--wheel` CLI arg, pointing at the wheel:
|
||||
|
||||
- Download the `dist.zip` build artifact from the `build-installer` job
|
||||
- Unzip it and find the wheel file
|
||||
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/) - but instead of installing from PyPI, install from the wheel
|
||||
- Test the app
|
||||
```sh
|
||||
./install.sh --wheel ../InvokeAI-4.0.0rc6-py3-none-any.whl
|
||||
```
|
||||
|
||||
- Install to a temporary directory so you get the new user experience
|
||||
- Download a model and generate
|
||||
|
||||
> The same wheel file is bundled in the installer and in the `dist` artifact, which is uploaded to PyPI. You should end up with the exactly the same installation as if the installer got the wheel from PyPI.
|
||||
|
||||
##### Something isn't right
|
||||
|
||||
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?) and start over.
|
||||
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?).
|
||||
|
||||
Now you can start from the top:
|
||||
|
||||
- Fix the issues and PR the fixes per usual
|
||||
- Get the PR approved and merged per usual
|
||||
- Switch to `main` and pull in the fixes
|
||||
- Run `make tag-release` to move the tag to `HEAD` (which has the fixes) and kick off the release workflow again
|
||||
- Re-do the sanity check
|
||||
|
||||
#### PyPI Publish Jobs
|
||||
|
||||
The publish jobs will not run if any of the previous jobs fail.
|
||||
The publish jobs will run if any of the previous jobs fail.
|
||||
|
||||
They use [GitHub environments], which are configured as [trusted publishers] on PyPI.
|
||||
|
||||
Both jobs require a @hipsterusername or @psychedelicious to approve them from the workflow's **Summary** tab.
|
||||
Both jobs require a maintainer to approve them from the workflow's **Summary** tab.
|
||||
|
||||
- Click the **Review deployments** button
|
||||
- Select the environment (either `testpypi` or `pypi` - typically you select both)
|
||||
- Select the environment (either `testpypi` or `pypi`)
|
||||
- Click **Approve and deploy**
|
||||
|
||||
> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
|
||||
@@ -112,33 +113,46 @@ If there are no incidents, contact @hipsterusername or @lstein, who have owner a
|
||||
|
||||
Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment.
|
||||
|
||||
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release for some reason:
|
||||
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release.
|
||||
|
||||
- Approve this publish job without approving the prod publish
|
||||
- Let it finish
|
||||
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/), making sure to use the Test PyPI index URL: `https://test.pypi.org/simple/`
|
||||
- Test the app
|
||||
If approved and successful, you could try out the test release like this:
|
||||
|
||||
```sh
|
||||
# Create a new virtual environment
|
||||
python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist
|
||||
# Install the distribution from Test PyPI
|
||||
pip install --index-url https://test.pypi.org/simple/ invokeai
|
||||
# Run and test the app
|
||||
invokeai-web
|
||||
# Cleanup
|
||||
deactivate
|
||||
rm -rf ~/.test-invokeai-dist
|
||||
```
|
||||
|
||||
#### `publish-pypi` Job
|
||||
|
||||
Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
|
||||
|
||||
It's a good idea to wait to approve and run this job until you have the release notes ready!
|
||||
## Publish the GitHub Release with installer
|
||||
|
||||
## Prep and publish the GitHub Release
|
||||
Once the release is published to PyPI, it's time to publish the GitHub release.
|
||||
|
||||
1. [Draft a new release] on GitHub, choosing the tag that triggered the release.
|
||||
2. The **Generate release notes** button automatically inserts the changelog and new contributors. Make sure to select the correct tags for this release and the last stable release. GH often selects the wrong tags - do this manually.
|
||||
3. Write the release notes, describing important changes. Contributions from community members should be shouted out. Use the GH-generated changelog to see all contributors. If there are Weblate translation updates, open that PR and shout out every person who contributed a translation.
|
||||
4. Check **Set as a pre-release** if it's a pre-release.
|
||||
5. Approve and wait for the `publish-pypi` job to finish if you haven't already.
|
||||
6. Publish the GH release.
|
||||
7. Post the release in Discord in the [releases](https://discord.com/channels/1020123559063990373/1149260708098359327) channel with abbreviated notes. For example:
|
||||
> Invoke v5.7.0 (stable): <https://github.com/invoke-ai/InvokeAI/releases/tag/v5.7.0>
|
||||
>
|
||||
> It's a pretty big one - Form Builder, Metadata Nodes (thanks @SkunkWorxDark!), and much more.
|
||||
8. Right click the message in releases and copy the link to it. Then, post that link in the [new-release-discussion](https://discord.com/channels/1020123559063990373/1149506274971631688) channel. For example:
|
||||
> Invoke v5.7.0 (stable): <https://discord.com/channels/1020123559063990373/1149260708098359327/1344521744916021248>
|
||||
1. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases.
|
||||
1. Use `scripts/get_external_contributions.py` to get a list of external contributions to shout out in the release notes.
|
||||
1. Upload the zip file created in **`build`** job into the Assets section of the release notes.
|
||||
1. Check **Set as a pre-release** if it's a pre-release.
|
||||
1. Check **Create a discussion for this release**.
|
||||
1. Publish the release.
|
||||
1. Announce the release in Discord.
|
||||
|
||||
> **TODO** Workflows can create a GitHub release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up.
|
||||
|
||||
## Manual Build
|
||||
|
||||
The `build installer` workflow can be dispatched manually. This is useful to test the installer for a given branch or tag.
|
||||
|
||||
No checks are run, it just builds.
|
||||
|
||||
## Manual Release
|
||||
|
||||
@@ -146,10 +160,12 @@ The `release` workflow can be dispatched manually. You must dispatch the workflo
|
||||
|
||||
This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above.
|
||||
|
||||
[InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[PyPI]: https://pypi.org/
|
||||
[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
|
||||
[Test PyPI]: https://test.pypi.org/
|
||||
[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
|
||||
[ncipollo/release-action]: https://github.com/ncipollo/release-action
|
||||
[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
|
||||
[trusted publishers]: https://docs.pypi.org/trusted-publishers/
|
||||
[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version
|
||||
|
||||
@@ -31,7 +31,6 @@ It is possible to fine-tune the settings for best performance or if you still ge
|
||||
Low-VRAM mode involves 4 features, each of which can be configured or fine-tuned:
|
||||
|
||||
- Partial model loading (`enable_partial_loading`)
|
||||
- PyTorch CUDA allocator config (`pytorch_cuda_alloc_conf`)
|
||||
- Dynamic RAM and VRAM cache sizes (`max_cache_ram_gb`, `max_cache_vram_gb`)
|
||||
- Working memory (`device_working_mem_gb`)
|
||||
- Keeping a RAM weight copy (`keep_ram_copy_of_weights`)
|
||||
@@ -52,16 +51,6 @@ As described above, you can enable partial model loading by adding this line to
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
### PyTorch CUDA allocator config
|
||||
|
||||
The PyTorch CUDA allocator's behavior can be configured using the `pytorch_cuda_alloc_conf` config. Tuning the allocator configuration can help to reduce the peak reserved VRAM. The optimal configuration is dependent on many factors (e.g. device type, VRAM, CUDA driver version, etc.), but switching from PyTorch's native allocator to using CUDA's built-in allocator works well on many systems. To try this, add the following line to your `invokeai.yaml` file:
|
||||
|
||||
```yaml
|
||||
pytorch_cuda_alloc_conf: "backend:cudaMallocAsync"
|
||||
```
|
||||
|
||||
A more complete explanation of the available configuration options is [here](https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf).
|
||||
|
||||
### Dynamic RAM and VRAM cache sizes
|
||||
|
||||
Loading models from disk is slow and can be a major bottleneck for performance. Invoke uses two model caches - RAM and VRAM - to reduce loading from disk to a minimum.
|
||||
@@ -86,26 +75,24 @@ But, if your GPU has enough VRAM to hold models fully, you might get a perf boos
|
||||
# As an example, if your system has 32GB of RAM and no other heavy processes, setting the `max_cache_ram_gb` to 28GB
|
||||
# might be a good value to achieve aggressive model caching.
|
||||
max_cache_ram_gb: 28
|
||||
|
||||
# The default max cache VRAM size is adjusted dynamically based on the amount of available VRAM (taking into
|
||||
# consideration the VRAM used by other processes).
|
||||
# You can override the default value by setting `max_cache_vram_gb`.
|
||||
# CAUTION: Most users should not manually set this value. See warning below.
|
||||
max_cache_vram_gb: 16
|
||||
# You can override the default value by setting `max_cache_vram_gb`. Note that this value takes precedence over the
|
||||
# `device_working_mem_gb`.
|
||||
# It is recommended to set the VRAM cache size to be as large as possible while leaving enough room for the working
|
||||
# memory of the tasks you will be doing. For example, on a 24GB GPU that will be running unquantized FLUX without any
|
||||
# auxiliary models, 18GB might be a good value.
|
||||
max_cache_vram_gb: 18
|
||||
```
|
||||
|
||||
!!! warning "Max safe value for `max_cache_vram_gb`"
|
||||
!!! tip "Max safe value for `max_cache_vram_gb`"
|
||||
|
||||
Most users should not manually configure the `max_cache_vram_gb`. This configuration value takes precedence over the `device_working_mem_gb` and any operations that explicitly reserve additional working memory (e.g. VAE decode). As such, manually configuring it increases the likelihood of encountering out-of-memory errors.
|
||||
|
||||
For users who wish to configure `max_cache_vram_gb`, the max safe value can be determined by subtracting `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
|
||||
To determine the max safe value for `max_cache_vram_gb`, subtract `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
|
||||
|
||||
For example, if you have a 12GB GPU, the max safe value for `max_cache_vram_gb` is `12GB - 3GB = 9GB`.
|
||||
|
||||
If you had increased `device_working_mem_gb` to 4GB, then the max safe value for `max_cache_vram_gb` is `12GB - 4GB = 8GB`.
|
||||
|
||||
Most users who override `max_cache_vram_gb` are doing so because they wish to use significantly less VRAM, and should be setting `max_cache_vram_gb` to a value significantly less than the 'max safe value'.
|
||||
|
||||
### Working memory
|
||||
|
||||
Invoke cannot use _all_ of your VRAM for model caching and loading. It requires some VRAM to use as working memory for various operations.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Optional, Union
|
||||
from typing import Literal, Optional, Union
|
||||
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
@@ -146,7 +146,7 @@ async def list_boards(
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
board_id: str | Literal["none"] = Path(description="The id of the board"),
|
||||
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
|
||||
) -> list[str]:
|
||||
|
||||
@@ -48,9 +48,7 @@ async def enqueue_batch(
|
||||
) -> EnqueueBatchResult:
|
||||
"""Processes a batch and enqueues the output graphs for execution."""
|
||||
|
||||
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
|
||||
queue_id=queue_id, batch=batch, prepend=prepend
|
||||
)
|
||||
return ApiDependencies.invoker.services.session_queue.enqueue_batch(queue_id=queue_id, batch=batch, prepend=prepend)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import mimetypes
|
||||
import socket
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import uvicorn
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
@@ -11,7 +15,11 @@ from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
|
||||
from torch.backends.mps import is_available as is_mps_available
|
||||
|
||||
# for PyCharm:
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
import invokeai.frontend.web as web_dir
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
@@ -30,13 +38,31 @@ from invokeai.app.api.routers import (
|
||||
from invokeai.app.api.sockets import SocketIO
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.custom_openapi import get_openapi_func
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
app_config = get_config()
|
||||
|
||||
|
||||
if is_mps_available():
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
|
||||
logger = InvokeAILogger.get_logger(config=app_config)
|
||||
# fix for windows mimetypes registry entries being borked
|
||||
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
logger.info(f"Using torch device: {torch_device_name}")
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
# We may change the port if the default is in use, this global variable is used to store the port so that we can log
|
||||
# the correct port when the server starts in the lifespan handler.
|
||||
port = app_config.port
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
@@ -45,7 +71,7 @@ async def lifespan(app: FastAPI):
|
||||
|
||||
# Log the server address when it starts - in case the network log level is not high enough to see the startup log
|
||||
proto = "https" if app_config.ssl_certfile else "http"
|
||||
msg = f"Invoke running on {proto}://{app_config.host}:{app_config.port} (Press CTRL+C to quit)"
|
||||
msg = f"Invoke running on {proto}://{app_config.host}:{port} (Press CTRL+C to quit)"
|
||||
|
||||
# Logging this way ignores the logger's log level and _always_ logs the message
|
||||
record = logger.makeRecord(
|
||||
@@ -160,3 +186,73 @@ except RuntimeError:
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
|
||||
def check_cudnn(logger: logging.Logger) -> None:
|
||||
"""Check for cuDNN issues that could be causing degraded performance."""
|
||||
if torch.backends.cudnn.is_available():
|
||||
try:
|
||||
# Note: At the time of writing (torch 2.2.1), torch.backends.cudnn.version() only raises an error the first
|
||||
# time it is called. Subsequent calls will return the version number without complaining about a mismatch.
|
||||
cudnn_version = torch.backends.cudnn.version()
|
||||
logger.info(f"cuDNN version: {cudnn_version}")
|
||||
except RuntimeError as e:
|
||||
logger.warning(
|
||||
"Encountered a cuDNN version issue. This may result in degraded performance. This issue is usually "
|
||||
"caused by an incompatible cuDNN version installed in your python environment, or on the host "
|
||||
f"system. Full error message:\n{e}"
|
||||
)
|
||||
|
||||
|
||||
def invoke_api() -> None:
|
||||
def find_port(port: int) -> int:
|
||||
"""Find a port not in use starting at given port"""
|
||||
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
|
||||
# https://github.com/WaylonWalker
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(1)
|
||||
if s.connect_ex(("localhost", port)) == 0:
|
||||
return find_port(port=port + 1)
|
||||
else:
|
||||
return port
|
||||
|
||||
if app_config.dev_reload:
|
||||
try:
|
||||
import jurigged
|
||||
except ImportError as e:
|
||||
logger.error(
|
||||
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.',
|
||||
exc_info=e,
|
||||
)
|
||||
else:
|
||||
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
|
||||
|
||||
global port
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
|
||||
check_cudnn(logger)
|
||||
|
||||
config = uvicorn.Config(
|
||||
app=app,
|
||||
host=app_config.host,
|
||||
port=port,
|
||||
loop="asyncio",
|
||||
log_level=app_config.log_level_network,
|
||||
ssl_certfile=app_config.ssl_certfile,
|
||||
ssl_keyfile=app_config.ssl_keyfile,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
|
||||
# replace uvicorn's loggers with InvokeAI's for consistent appearance
|
||||
uvicorn_logger = InvokeAILogger.get_logger("uvicorn")
|
||||
uvicorn_logger.handlers.clear()
|
||||
for hdlr in logger.handlers:
|
||||
uvicorn_logger.addHandler(hdlr)
|
||||
|
||||
loop.run_until_complete(server.serve())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
invoke_api()
|
||||
|
||||
@@ -1,5 +1,33 @@
|
||||
import shutil
|
||||
import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
custom_nodes_path = Path(get_config().custom_nodes_path)
|
||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
|
||||
|
||||
# copy our custom nodes __init__.py to the custom nodes directory
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
|
||||
|
||||
# set the same permissions as the destination directory, in case our source is read-only,
|
||||
# so that the files are user-writable
|
||||
for p in custom_nodes_path.glob("**/*"):
|
||||
p.chmod(custom_nodes_path.stat().st_mode)
|
||||
|
||||
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
|
||||
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
# add core nodes to __all__
|
||||
python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py"))
|
||||
__all__ = [f.stem for f in python_files] # type: ignore
|
||||
|
||||
@@ -44,6 +44,8 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
CUSTOM_NODE_PACK_SUFFIX = "__invokeai-custom-node"
|
||||
|
||||
|
||||
class InvalidVersionError(ValueError):
|
||||
pass
|
||||
@@ -238,11 +240,6 @@ class BaseInvocation(ABC, BaseModel):
|
||||
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
|
||||
return signature(cls.invoke).return_annotation
|
||||
|
||||
@classmethod
|
||||
def get_invocation_for_type(cls, invocation_type: str) -> BaseInvocation | None:
|
||||
"""Gets the invocation class for a given invocation type."""
|
||||
return cls.get_invocations_map().get(invocation_type)
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
|
||||
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
|
||||
@@ -449,27 +446,8 @@ def invocation(
|
||||
if re.compile(r"^\S+$").match(invocation_type) is None:
|
||||
raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"')
|
||||
|
||||
# The node pack is the module name - will be "invokeai" for built-in nodes
|
||||
node_pack = cls.__module__.split(".")[0]
|
||||
|
||||
# Handle the case where an existing node is being clobbered by the one we are registering
|
||||
if invocation_type in BaseInvocation.get_invocation_types():
|
||||
clobbered_invocation = BaseInvocation.get_invocation_for_type(invocation_type)
|
||||
# This should always be true - we just checked if the invocation type was in the set
|
||||
assert clobbered_invocation is not None
|
||||
|
||||
clobbered_node_pack = clobbered_invocation.UIConfig.node_pack
|
||||
|
||||
if clobbered_node_pack == "invokeai":
|
||||
# The node being clobbered is a core node
|
||||
raise ValueError(
|
||||
f'Cannot load node "{invocation_type}" from node pack "{node_pack}" - a core node with the same type already exists'
|
||||
)
|
||||
else:
|
||||
# The node being clobbered is a custom node
|
||||
raise ValueError(
|
||||
f'Cannot load node "{invocation_type}" from node pack "{node_pack}" - a node with the same type already exists in node pack "{clobbered_node_pack}"'
|
||||
)
|
||||
raise ValueError(f'Invocation type "{invocation_type}" already exists')
|
||||
|
||||
validate_fields(cls.model_fields, invocation_type)
|
||||
|
||||
@@ -479,7 +457,8 @@ def invocation(
|
||||
uiconfig["tags"] = tags
|
||||
uiconfig["category"] = category
|
||||
uiconfig["classification"] = classification
|
||||
uiconfig["node_pack"] = node_pack
|
||||
# The node pack is the module name - will be "invokeai" for built-in nodes
|
||||
uiconfig["node_pack"] = cls.__module__.split(".")[0]
|
||||
|
||||
if version is not None:
|
||||
try:
|
||||
|
||||
@@ -10,12 +10,10 @@ from pathlib import Path
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
loaded_packs: list[str] = []
|
||||
failed_packs: list[str] = []
|
||||
loaded_count = 0
|
||||
|
||||
custom_nodes_dir = Path(__file__).parent
|
||||
|
||||
for d in custom_nodes_dir.iterdir():
|
||||
for d in Path(__file__).parent.iterdir():
|
||||
# skip files
|
||||
if not d.is_dir():
|
||||
continue
|
||||
@@ -49,16 +47,12 @@ for d in custom_nodes_dir.iterdir():
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
loaded_packs.append(module_name)
|
||||
loaded_count += 1
|
||||
except Exception:
|
||||
failed_packs.append(module_name)
|
||||
full_error = traceback.format_exc()
|
||||
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
|
||||
logger.error(f"Failed to load node pack {module_name}:\n{full_error}")
|
||||
|
||||
del init, module_name
|
||||
|
||||
loaded_count = len(loaded_packs)
|
||||
if loaded_count > 0:
|
||||
logger.info(
|
||||
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_dir}: {', '.join(loaded_packs)}"
|
||||
)
|
||||
logger.info(f"Loaded {loaded_count} node packs from {Path(__file__).parent}")
|
||||
|
||||
@@ -41,11 +41,16 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision).
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
scaling_constant = 1090 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add a 20% buffer to the working memory estimate to be safe.
|
||||
working_memory = working_memory * 1.2
|
||||
return int(working_memory)
|
||||
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
|
||||
@@ -60,7 +60,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
element_size = 4 if self.fp32 else 2
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
scaling_constant = 960 # Determined experimentally.
|
||||
|
||||
if use_tiling:
|
||||
tile_size = self.tile_size
|
||||
@@ -84,7 +84,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
return int(working_memory)
|
||||
# We add 20% to the working memory estimate to be safe.
|
||||
working_memory = int(working_memory * 1.2)
|
||||
return working_memory
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
import shutil
|
||||
import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_custom_nodes(custom_nodes_path: Path):
|
||||
"""
|
||||
Loads all custom nodes from the custom_nodes_path directory.
|
||||
|
||||
This function copies a custom __init__.py file to the custom_nodes_path directory, effectively turning it into a
|
||||
python module.
|
||||
|
||||
The custom __init__.py file itself imports all the custom node packs as python modules from the custom_nodes_path
|
||||
directory.
|
||||
|
||||
Then,the custom __init__.py file is programmatically imported using importlib. As it executes, it imports all the
|
||||
custom node packs as python modules.
|
||||
"""
|
||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
|
||||
|
||||
# copy our custom nodes __init__.py to the custom nodes directory
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
|
||||
|
||||
# set the same permissions as the destination directory, in case our source is read-only,
|
||||
# so that the files are user-writable
|
||||
for p in custom_nodes_path.glob("**/*"):
|
||||
p.chmod(custom_nodes_path.stat().st_mode)
|
||||
|
||||
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
|
||||
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
@@ -284,7 +284,6 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
tags=["metadata"],
|
||||
category="metadata",
|
||||
version="1.0.0",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class MetadataFieldExtractorInvocation(BaseInvocation):
|
||||
"""Extracts the text value from an image's metadata given a key.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -265,9 +265,13 @@ class ImageInvocation(BaseInvocation):
|
||||
image: ImageField = InputField(description="The image to load")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image_dto = context.images.get_dto(self.image.image_name)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
return ImageOutput.build(image_dto=image_dto)
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=self.image.image_name),
|
||||
width=image.width,
|
||||
height=image.height,
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
|
||||
@@ -43,11 +43,16 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision).
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
scaling_constant = 1230 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add a 20% buffer to the working memory estimate to be safe.
|
||||
working_memory = working_memory * 1.2
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
|
||||
@@ -9,6 +9,6 @@ def validate_weights(weights: Union[float, list[float]]) -> None:
|
||||
|
||||
|
||||
def validate_begin_end_step(begin_step_percent: float, end_step_percent: float) -> None:
|
||||
"""Validate that begin_step_percent is less than or equal to end_step_percent"""
|
||||
if begin_step_percent > end_step_percent:
|
||||
"""Validate that begin_step_percent is less than end_step_percent"""
|
||||
if begin_step_percent >= end_step_percent:
|
||||
raise ValueError("Begin step percent must be less than or equal to end step percent")
|
||||
|
||||
@@ -1,82 +1,12 @@
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
|
||||
def get_app():
|
||||
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
|
||||
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
|
||||
"""
|
||||
from invokeai.app.api_app import app, loop
|
||||
|
||||
return app, loop
|
||||
"""This is a wrapper around the main app entrypoint, to allow for CLI args to be parsed before running the app."""
|
||||
|
||||
|
||||
def run_app() -> None:
|
||||
"""The main entrypoint for the app."""
|
||||
# Parse the CLI arguments.
|
||||
# Before doing _anything_, parse CLI args!
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
InvokeAIArgs.parse_args()
|
||||
|
||||
# Load config.
|
||||
app_config = get_config()
|
||||
from invokeai.app.api_app import invoke_api
|
||||
|
||||
logger = InvokeAILogger.get_logger(config=app_config)
|
||||
|
||||
# Configure the torch CUDA memory allocator.
|
||||
# NOTE: It is important that this happens before torch is imported.
|
||||
if app_config.pytorch_cuda_alloc_conf:
|
||||
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
|
||||
|
||||
# Import from startup_utils here to avoid importing torch before configure_torch_cuda_allocator() is called.
|
||||
from invokeai.app.util.startup_utils import (
|
||||
apply_monkeypatches,
|
||||
check_cudnn,
|
||||
enable_dev_reload,
|
||||
find_open_port,
|
||||
register_mime_types,
|
||||
)
|
||||
|
||||
# Find an open port, and modify the config accordingly.
|
||||
orig_config_port = app_config.port
|
||||
app_config.port = find_open_port(app_config.port)
|
||||
if orig_config_port != app_config.port:
|
||||
logger.warning(f"Port {orig_config_port} is already in use. Using port {app_config.port}.")
|
||||
|
||||
# Miscellaneous startup tasks.
|
||||
apply_monkeypatches()
|
||||
register_mime_types()
|
||||
if app_config.dev_reload:
|
||||
enable_dev_reload()
|
||||
check_cudnn(logger)
|
||||
|
||||
# Initialize the app and event loop.
|
||||
app, loop = get_app()
|
||||
|
||||
# Load custom nodes. This must be done after importing the Graph class, which itself imports all modules from the
|
||||
# invocations module. The ordering here is implicit, but important - we want to load custom nodes after all the
|
||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path)
|
||||
|
||||
# Start the server.
|
||||
config = uvicorn.Config(
|
||||
app=app,
|
||||
host=app_config.host,
|
||||
port=app_config.port,
|
||||
loop="asyncio",
|
||||
log_level=app_config.log_level_network,
|
||||
ssl_certfile=app_config.ssl_certfile,
|
||||
ssl_keyfile=app_config.ssl_keyfile,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
|
||||
# replace uvicorn's loggers with InvokeAI's for consistent appearance
|
||||
uvicorn_logger = InvokeAILogger.get_logger("uvicorn")
|
||||
uvicorn_logger.handlers.clear()
|
||||
for hdlr in logger.handlers:
|
||||
uvicorn_logger.addHandler(hdlr)
|
||||
|
||||
loop.run_until_complete(server.serve())
|
||||
invoke_api()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory
|
||||
|
||||
@@ -27,7 +27,7 @@ class BoardImageRecordStorageBase(ABC):
|
||||
@abstractmethod
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
board_id: str | Literal["none"],
|
||||
categories: list[ImageCategory] | None,
|
||||
is_intermediate: bool | None,
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import sqlite3
|
||||
from typing import Optional, cast
|
||||
import threading
|
||||
from typing import Literal, Optional, cast
|
||||
|
||||
from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
@@ -12,9 +13,15 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
|
||||
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
_conn: sqlite3.Connection
|
||||
_cursor: sqlite3.Cursor
|
||||
_lock: threading.RLock
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
def add_image_to_board(
|
||||
self,
|
||||
@@ -22,8 +29,8 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
image_name: str,
|
||||
) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO board_images (board_id, image_name)
|
||||
VALUES (?, ?)
|
||||
@@ -35,14 +42,16 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def remove_image_from_board(
|
||||
self,
|
||||
image_name: str,
|
||||
) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM board_images
|
||||
WHERE image_name = ?;
|
||||
@@ -53,6 +62,8 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_images_for_board(
|
||||
self,
|
||||
@@ -61,108 +72,138 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
limit: int = 10,
|
||||
) -> OffsetPaginatedResults[ImageRecord]:
|
||||
# TODO: this isn't paginated yet?
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
ORDER BY board_images.updated_at DESC;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
ORDER BY board_images.updated_at DESC;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images WHERE 1=1;
|
||||
"""
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images WHERE 1=1;
|
||||
"""
|
||||
)
|
||||
count = cast(int, self._cursor.fetchone()[0])
|
||||
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
||||
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
board_id: str | Literal["none"],
|
||||
categories: list[ImageCategory] | None,
|
||||
is_intermediate: bool | None,
|
||||
) -> list[str]:
|
||||
params: list[str | bool] = []
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
# Base query is a join between images and board_images
|
||||
stmt = """
|
||||
params: list[str | bool] = []
|
||||
|
||||
# Base query is a join between images and board_images
|
||||
stmt = """
|
||||
SELECT images.image_name
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
stmt += f"""--sql
|
||||
# Add the board_id filter
|
||||
if board_id == "none":
|
||||
stmt += "AND board_images.board_id IS NULL"
|
||||
else:
|
||||
stmt += "AND board_images.board_id = ?"
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
stmt += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
"""
|
||||
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
params.append(c)
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
params.append(c)
|
||||
|
||||
# Add the is_intermediate filter
|
||||
if is_intermediate is not None:
|
||||
stmt += """--sql
|
||||
# Add the is_intermediate filter
|
||||
if is_intermediate is not None:
|
||||
stmt += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
"""
|
||||
params.append(is_intermediate)
|
||||
params.append(is_intermediate)
|
||||
|
||||
# Put a ring on it
|
||||
stmt += ";"
|
||||
# Put a ring on it
|
||||
stmt += ";"
|
||||
|
||||
# Execute the query
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(stmt, params)
|
||||
# Execute the query
|
||||
self._cursor.execute(stmt, params)
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
image_names = [r[0] for r in result]
|
||||
return image_names
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = [r[0] for r in result]
|
||||
return image_names
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_board_for_image(
|
||||
self,
|
||||
image_name: str,
|
||||
) -> Optional[str]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT board_id
|
||||
FROM board_images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
return None
|
||||
return cast(str, result[0])
|
||||
(image_name,),
|
||||
)
|
||||
result = self._cursor.fetchone()
|
||||
if result is None:
|
||||
return None
|
||||
return cast(str, result[0])
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_image_count_for_board(self, board_id: str) -> int:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE images.is_intermediate = FALSE
|
||||
AND board_images.board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
return count
|
||||
(board_id,),
|
||||
)
|
||||
count = cast(int, self._cursor.fetchone()[0])
|
||||
return count
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory
|
||||
|
||||
@@ -27,7 +27,7 @@ class BoardImagesServiceABC(ABC):
|
||||
@abstractmethod
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
board_id: str | Literal["none"],
|
||||
categories: list[ImageCategory] | None,
|
||||
is_intermediate: bool | None,
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
from invokeai.app.services.board_images.board_images_base import BoardImagesServiceABC
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory
|
||||
@@ -26,7 +26,7 @@ class BoardImagesService(BoardImagesServiceABC):
|
||||
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
board_id: str | Literal["none"],
|
||||
categories: list[ImageCategory] | None,
|
||||
is_intermediate: bool | None,
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import sqlite3
|
||||
import threading
|
||||
from typing import Union, cast
|
||||
|
||||
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
|
||||
@@ -18,14 +19,20 @@ from invokeai.app.util.misc import uuid_string
|
||||
|
||||
|
||||
class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
_conn: sqlite3.Connection
|
||||
_cursor: sqlite3.Cursor
|
||||
_lock: threading.RLock
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
def delete(self, board_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM boards
|
||||
WHERE board_id = ?;
|
||||
@@ -33,9 +40,14 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
(board_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordDeleteException from e
|
||||
except Exception as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -43,8 +55,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
) -> BoardRecord:
|
||||
try:
|
||||
board_id = uuid_string()
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO boards (board_id, board_name)
|
||||
VALUES (?, ?);
|
||||
@@ -55,6 +67,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordSaveException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(board_id)
|
||||
|
||||
def get(
|
||||
@@ -62,8 +76,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
board_id: str,
|
||||
) -> BoardRecord:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM boards
|
||||
@@ -72,9 +86,12 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
(board_id,),
|
||||
)
|
||||
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
result = cast(Union[sqlite3.Row, None], self._cursor.fetchone())
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordNotFoundException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
if result is None:
|
||||
raise BoardRecordNotFoundException
|
||||
return BoardRecord(**dict(result))
|
||||
@@ -85,10 +102,11 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
changes: BoardChanges,
|
||||
) -> BoardRecord:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
self._lock.acquire()
|
||||
|
||||
# Change the name of a board
|
||||
if changes.board_name is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET board_name = ?
|
||||
@@ -99,7 +117,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
|
||||
# Change the cover image of a board
|
||||
if changes.cover_image_name is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET cover_image_name = ?
|
||||
@@ -110,7 +128,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
|
||||
# Change the archived status of a board
|
||||
if changes.archived is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET archived = ?
|
||||
@@ -123,6 +141,8 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordSaveException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(board_id)
|
||||
|
||||
def get_many(
|
||||
@@ -133,10 +153,11 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
limit: int = 10,
|
||||
include_archived: bool = False,
|
||||
) -> OffsetPaginatedResults[BoardRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
# Build base query
|
||||
base_query = """
|
||||
# Build base query
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
@@ -144,67 +165,81 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
LIMIT ? OFFSET ?;
|
||||
"""
|
||||
|
||||
# Determine archived filter condition
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
# Determine archived filter condition
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
# Execute query to fetch boards
|
||||
cursor.execute(final_query, (limit, offset))
|
||||
# Execute query to fetch boards
|
||||
self._cursor.execute(final_query, (limit, offset))
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
# Determine count query
|
||||
if include_archived:
|
||||
count_query = """
|
||||
# Determine count query
|
||||
if include_archived:
|
||||
count_query = """
|
||||
SELECT COUNT(*)
|
||||
FROM boards;
|
||||
"""
|
||||
else:
|
||||
count_query = """
|
||||
else:
|
||||
count_query = """
|
||||
SELECT COUNT(*)
|
||||
FROM boards
|
||||
WHERE archived = 0;
|
||||
"""
|
||||
|
||||
# Execute count query
|
||||
cursor.execute(count_query)
|
||||
# Execute count query
|
||||
self._cursor.execute(count_query)
|
||||
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
count = cast(int, self._cursor.fetchone()[0])
|
||||
|
||||
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
|
||||
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
|
||||
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_all(
|
||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||
) -> list[BoardRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
if order_by == BoardRecordOrderBy.Name:
|
||||
base_query = """
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
if order_by == BoardRecordOrderBy.Name:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY LOWER(board_name) {direction}
|
||||
"""
|
||||
else:
|
||||
base_query = """
|
||||
else:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY {order_by} {direction}
|
||||
"""
|
||||
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
cursor.execute(final_query)
|
||||
self._cursor.execute(final_query)
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
return boards
|
||||
return boards
|
||||
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
@@ -63,11 +63,7 @@ class BulkDownloadService(BulkDownloadBase):
|
||||
return [self._invoker.services.images.get_dto(image_name) for image_name in image_names]
|
||||
|
||||
def _board_handler(self, board_id: str) -> list[ImageDTO]:
|
||||
image_names = self._invoker.services.board_image_records.get_all_board_image_names_for_board(
|
||||
board_id,
|
||||
categories=None,
|
||||
is_intermediate=None,
|
||||
)
|
||||
image_names = self._invoker.services.board_image_records.get_all_board_image_names_for_board(board_id)
|
||||
return self._image_handler(image_names)
|
||||
|
||||
def generate_item_id(self, board_id: Optional[str]) -> str:
|
||||
|
||||
@@ -91,7 +91,6 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
@@ -170,9 +169,6 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
vram: Optional[float] = Field(default=None, ge=0, description="DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.")
|
||||
lazy_offload: bool = Field(default=True, description="DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.")
|
||||
|
||||
# PyTorch Memory Allocator
|
||||
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
|
||||
|
||||
# DEVICE
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import sqlite3
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from typing import Optional, Union, cast
|
||||
|
||||
@@ -21,14 +22,21 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
|
||||
class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
_conn: sqlite3.Connection
|
||||
_cursor: sqlite3.Cursor
|
||||
_lock: threading.RLock
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
def get(self, image_name: str) -> ImageRecord:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
|
||||
self._cursor.execute(
|
||||
f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS} FROM images
|
||||
WHERE image_name = ?;
|
||||
@@ -36,9 +44,12 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordNotFoundException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
if not result:
|
||||
raise ImageRecordNotFoundException
|
||||
@@ -47,8 +58,9 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT metadata FROM images
|
||||
WHERE image_name = ?;
|
||||
@@ -56,7 +68,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
|
||||
|
||||
if not result:
|
||||
raise ImageRecordNotFoundException
|
||||
@@ -65,7 +77,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
|
||||
return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordNotFoundException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def update(
|
||||
self,
|
||||
@@ -73,10 +88,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
changes: ImageRecordChanges,
|
||||
) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
self._lock.acquire()
|
||||
# Change the category of the image
|
||||
if changes.image_category is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET image_category = ?
|
||||
@@ -87,7 +102,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
# Change the session associated with the image
|
||||
if changes.session_id is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET session_id = ?
|
||||
@@ -98,7 +113,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
# Change the image's `is_intermediate`` flag
|
||||
if changes.is_intermediate is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET is_intermediate = ?
|
||||
@@ -109,7 +124,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
# Change the image's `starred`` state
|
||||
if changes.starred is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET starred = ?
|
||||
@@ -122,6 +137,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordSaveException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_many(
|
||||
self,
|
||||
@@ -135,104 +152,110 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> OffsetPaginatedResults[ImageRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
# Manually build two queries - one for the count, one for the records
|
||||
count_query = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
images_query = f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS}
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
query_conditions = ""
|
||||
query_params: list[Union[int, str, bool]] = []
|
||||
|
||||
if image_origin is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.image_origin = ?
|
||||
"""
|
||||
query_params.append(image_origin.value)
|
||||
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
|
||||
query_conditions += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
# Manually build two queries - one for the count, one for the records
|
||||
count_query = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
query_params.append(c)
|
||||
|
||||
if is_intermediate is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
images_query = f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS}
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
query_params.append(is_intermediate)
|
||||
query_conditions = ""
|
||||
query_params: list[Union[int, str, bool]] = []
|
||||
|
||||
# board_id of "none" is reserved for images without a board
|
||||
if board_id == "none":
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
elif board_id is not None:
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
query_params.append(board_id)
|
||||
if image_origin is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.image_origin = ?
|
||||
"""
|
||||
query_params.append(image_origin.value)
|
||||
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND images.metadata LIKE ?
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
else:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
query_conditions += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
"""
|
||||
|
||||
# Final images query with pagination
|
||||
images_query += query_conditions + query_pagination + ";"
|
||||
# Add all the parameters
|
||||
images_params = query_params.copy()
|
||||
# Add the pagination parameters
|
||||
images_params.extend([limit, offset])
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
query_params.append(c)
|
||||
|
||||
# Build the list of images, deserializing each row
|
||||
cursor.execute(images_query, images_params)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
if is_intermediate is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
"""
|
||||
|
||||
# Set up and execute the count query, without pagination
|
||||
count_query += query_conditions + ";"
|
||||
count_params = query_params.copy()
|
||||
cursor.execute(count_query, count_params)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
query_params.append(is_intermediate)
|
||||
|
||||
# board_id of "none" is reserved for images without a board
|
||||
if board_id == "none":
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
elif board_id is not None:
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
query_params.append(board_id)
|
||||
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND images.metadata LIKE ?
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
else:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
|
||||
# Final images query with pagination
|
||||
images_query += query_conditions + query_pagination + ";"
|
||||
# Add all the parameters
|
||||
images_params = query_params.copy()
|
||||
# Add the pagination parameters
|
||||
images_params.extend([limit, offset])
|
||||
|
||||
# Build the list of images, deserializing each row
|
||||
self._cursor.execute(images_query, images_params)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
|
||||
# Set up and execute the count query, without pagination
|
||||
count_query += query_conditions + ";"
|
||||
count_params = query_params.copy()
|
||||
self._cursor.execute(count_query, count_params)
|
||||
count = cast(int, self._cursor.fetchone()[0])
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
||||
|
||||
def delete(self, image_name: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE image_name = ?;
|
||||
@@ -243,48 +266,58 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def delete_many(self, image_names: list[str]) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
|
||||
placeholders = ",".join("?" for _ in image_names)
|
||||
|
||||
self._lock.acquire()
|
||||
|
||||
# Construct the SQLite query with the placeholders
|
||||
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
|
||||
|
||||
# Execute the query with the list of IDs as parameters
|
||||
cursor.execute(query, image_names)
|
||||
self._cursor.execute(query, image_names)
|
||||
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_intermediates_count(self) -> int:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
self._conn.commit()
|
||||
return count
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
count = cast(int, self._cursor.fetchone()[0])
|
||||
self._conn.commit()
|
||||
return count
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def delete_intermediates(self) -> list[str]:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT image_name FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = [r[0] for r in result]
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
@@ -295,6 +328,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -311,8 +346,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO images (
|
||||
image_name,
|
||||
@@ -345,7 +380,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
)
|
||||
self._conn.commit()
|
||||
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT created_at
|
||||
FROM images
|
||||
@@ -354,30 +389,34 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
created_at = datetime.fromisoformat(cursor.fetchone()[0])
|
||||
created_at = datetime.fromisoformat(self._cursor.fetchone()[0])
|
||||
|
||||
return created_at
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordSaveException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM images
|
||||
JOIN board_images ON images.image_name = board_images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
AND images.is_intermediate = FALSE
|
||||
ORDER BY images.starred DESC, images.created_at DESC
|
||||
LIMIT 1;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM images
|
||||
JOIN board_images ON images.image_name = board_images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
AND images.is_intermediate = FALSE
|
||||
ORDER BY images.starred DESC, images.created_at DESC
|
||||
LIMIT 1;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
|
||||
finally:
|
||||
self._lock.release()
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
|
||||
@@ -265,11 +265,7 @@ class ImageService(ImageServiceABC):
|
||||
|
||||
def delete_images_on_board(self, board_id: str):
|
||||
try:
|
||||
image_names = self.__invoker.services.board_image_records.get_all_board_image_names_for_board(
|
||||
board_id,
|
||||
categories=None,
|
||||
is_intermediate=None,
|
||||
)
|
||||
image_names = self.__invoker.services.board_image_records.get_all_board_image_names_for_board(board_id)
|
||||
for image_name in image_names:
|
||||
self.__invoker.services.image_files.delete(image_name)
|
||||
self.__invoker.services.image_records.delete_many(image_names)
|
||||
@@ -282,7 +278,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Failed to delete image files")
|
||||
raise
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error(f"Problem deleting image records and files: {str(e)}")
|
||||
self.__invoker.services.logger.error("Problem deleting image records and files")
|
||||
raise e
|
||||
|
||||
def delete_intermediates(self) -> int:
|
||||
|
||||
@@ -78,6 +78,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
"""
|
||||
super().__init__()
|
||||
self._db = db
|
||||
self._cursor = db.conn.cursor()
|
||||
self._logger = logger
|
||||
|
||||
@property
|
||||
@@ -95,38 +96,38 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
|
||||
"""
|
||||
try:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO models (
|
||||
id,
|
||||
config
|
||||
)
|
||||
VALUES (?,?);
|
||||
""",
|
||||
(
|
||||
config.key,
|
||||
config.model_dump_json(),
|
||||
),
|
||||
)
|
||||
self._db.conn.commit()
|
||||
with self._db.lock:
|
||||
try:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO models (
|
||||
id,
|
||||
config
|
||||
)
|
||||
VALUES (?,?);
|
||||
""",
|
||||
(
|
||||
config.key,
|
||||
config.model_dump_json(),
|
||||
),
|
||||
)
|
||||
self._db.conn.commit()
|
||||
|
||||
except sqlite3.IntegrityError as e:
|
||||
self._db.conn.rollback()
|
||||
if "UNIQUE constraint failed" in str(e):
|
||||
if "models.path" in str(e):
|
||||
msg = f"A model with path '{config.path}' is already installed"
|
||||
elif "models.name" in str(e):
|
||||
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
|
||||
except sqlite3.IntegrityError as e:
|
||||
self._db.conn.rollback()
|
||||
if "UNIQUE constraint failed" in str(e):
|
||||
if "models.path" in str(e):
|
||||
msg = f"A model with path '{config.path}' is already installed"
|
||||
elif "models.name" in str(e):
|
||||
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
|
||||
else:
|
||||
msg = f"A model with key '{config.key}' is already installed"
|
||||
raise DuplicateModelException(msg) from e
|
||||
else:
|
||||
msg = f"A model with key '{config.key}' is already installed"
|
||||
raise DuplicateModelException(msg) from e
|
||||
else:
|
||||
raise e
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
|
||||
return self.get_model(config.key)
|
||||
|
||||
@@ -138,21 +139,21 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
Can raise an UnknownModelException
|
||||
"""
|
||||
try:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
if cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
self._db.conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
with self._db.lock:
|
||||
try:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
if self._cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
self._db.conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
|
||||
def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig:
|
||||
record = self.get_model(key)
|
||||
@@ -163,23 +164,23 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
json_serialized = record.model_dump_json()
|
||||
|
||||
try:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE models
|
||||
SET
|
||||
config=?
|
||||
WHERE id=?;
|
||||
""",
|
||||
(json_serialized, key),
|
||||
)
|
||||
if cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
self._db.conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
with self._db.lock:
|
||||
try:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE models
|
||||
SET
|
||||
config=?
|
||||
WHERE id=?;
|
||||
""",
|
||||
(json_serialized, key),
|
||||
)
|
||||
if self._cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
self._db.conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
|
||||
return self.get_model(key)
|
||||
|
||||
@@ -191,33 +192,33 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
Exceptions: UnknownModelException
|
||||
"""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
rows = cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
rows = self._cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
return model
|
||||
|
||||
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
rows = cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
rows = self._cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
return model
|
||||
|
||||
def exists(self, key: str) -> bool:
|
||||
@@ -226,15 +227,16 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
:param key: Unique key for the model to be deleted
|
||||
"""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
select count(*) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
count = 0
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
select count(*) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
count = self._cursor.fetchone()[0]
|
||||
return count > 0
|
||||
|
||||
def search_by_attr(
|
||||
@@ -282,18 +284,17 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
where_clause.append("format=?")
|
||||
bindings.append(model_format)
|
||||
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
||||
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config, strftime('%s',updated_at)
|
||||
FROM models
|
||||
{where}
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
||||
""",
|
||||
tuple(bindings),
|
||||
)
|
||||
result = cursor.fetchall()
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config, strftime('%s',updated_at)
|
||||
FROM models
|
||||
{where}
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
||||
""",
|
||||
tuple(bindings),
|
||||
)
|
||||
result = self._cursor.fetchall()
|
||||
|
||||
# Parse the model configs.
|
||||
results: list[AnyModelConfig] = []
|
||||
@@ -312,28 +313,34 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
|
||||
"""Return models with the indicated path."""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||
results = []
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
results = [
|
||||
ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall()
|
||||
]
|
||||
return results
|
||||
|
||||
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
|
||||
"""Return models with the indicated hash."""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||
results = []
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
results = [
|
||||
ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall()
|
||||
]
|
||||
return results
|
||||
|
||||
def list_models(
|
||||
@@ -349,32 +356,33 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
ModelRecordOrderBy.Format: "format",
|
||||
}
|
||||
|
||||
cursor = self._db.conn.cursor()
|
||||
|
||||
# Lock so that the database isn't updated while we're doing the two queries.
|
||||
# query1: get the total number of model configs
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
select count(*) from models;
|
||||
""",
|
||||
(),
|
||||
)
|
||||
total = int(cursor.fetchone()[0])
|
||||
with self._db.lock:
|
||||
# query1: get the total number of model configs
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
select count(*) from models;
|
||||
""",
|
||||
(),
|
||||
)
|
||||
total = int(self._cursor.fetchone()[0])
|
||||
|
||||
# query2: fetch key fields
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config
|
||||
FROM models
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||
LIMIT ?
|
||||
OFFSET ?;
|
||||
""",
|
||||
(
|
||||
per_page,
|
||||
page * per_page,
|
||||
),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
items = [ModelSummary.model_validate(dict(x)) for x in rows]
|
||||
return PaginatedResults(page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items)
|
||||
# query2: fetch key fields
|
||||
self._cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config
|
||||
FROM models
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||
LIMIT ?
|
||||
OFFSET ?;
|
||||
""",
|
||||
(
|
||||
per_page,
|
||||
page * per_page,
|
||||
),
|
||||
)
|
||||
rows = self._cursor.fetchall()
|
||||
items = [ModelSummary.model_validate(dict(x)) for x in rows]
|
||||
return PaginatedResults(
|
||||
page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Coroutine, Optional
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
@@ -33,7 +33,7 @@ class SessionQueueBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> Coroutine[Any, Any, EnqueueBatchResult]:
|
||||
def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
"""Enqueues all permutations of a batch for execution."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -568,7 +568,7 @@ ValueToInsertTuple: TypeAlias = tuple[
|
||||
str | None, # workflow (optional, as stringified JSON)
|
||||
str | None, # origin (optional)
|
||||
str | None, # destination (optional)
|
||||
int | None, # retried_from_item_id (optional, this is always None for new items)
|
||||
str | None, # retried_from_item_id (optional, this is always None for new items)
|
||||
]
|
||||
"""A type alias for the tuple of values to insert into the session queue table."""
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import json
|
||||
import sqlite3
|
||||
import threading
|
||||
from typing import Optional, Union, cast
|
||||
|
||||
from pydantic_core import to_jsonable_python
|
||||
@@ -37,6 +37,9 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
class SqliteSessionQueue(SessionQueueBase):
|
||||
__invoker: Invoker
|
||||
__conn: sqlite3.Connection
|
||||
__cursor: sqlite3.Cursor
|
||||
__lock: threading.RLock
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self.__invoker = invoker
|
||||
@@ -52,7 +55,9 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self.__lock = db.lock
|
||||
self.__conn = db.conn
|
||||
self.__cursor = self.__conn.cursor()
|
||||
|
||||
def _set_in_progress_to_canceled(self) -> None:
|
||||
"""
|
||||
@@ -60,8 +65,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
This is necessary because the invoker may have been killed while processing a queue item.
|
||||
"""
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
@@ -69,13 +74,14 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
"""
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def _get_current_queue_size(self, queue_id: str) -> int:
|
||||
"""Gets the current number of pending queue items"""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
@@ -85,12 +91,11 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
return cast(int, cursor.fetchone()[0])
|
||||
return cast(int, self.__cursor.fetchone()[0])
|
||||
|
||||
def _get_highest_priority(self, queue_id: str) -> int:
|
||||
"""Gets the highest priority value in the queue"""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT MAX(priority)
|
||||
FROM session_queue
|
||||
@@ -100,14 +105,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
return cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||
return cast(Union[int, None], self.__cursor.fetchone()[0]) or 0
|
||||
|
||||
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
|
||||
|
||||
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
self.__lock.acquire()
|
||||
|
||||
# TODO: how does this work in a multi-user scenario?
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
@@ -129,17 +132,19 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
if requested_count > enqueued_count:
|
||||
values_to_insert = values_to_insert[:max_new_queue_items]
|
||||
|
||||
cursor.executemany(
|
||||
self.__cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
enqueue_result = EnqueueBatchResult(
|
||||
queue_id=queue_id,
|
||||
requested=requested_count,
|
||||
@@ -151,19 +156,25 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return enqueue_result
|
||||
|
||||
def dequeue(self) -> Optional[SessionQueueItem]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
if result is None:
|
||||
return None
|
||||
queue_item = SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
@@ -171,40 +182,52 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return queue_item
|
||||
|
||||
def get_next(self, queue_id: str) -> Optional[SessionQueueItem]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
created_at ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
created_at ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
if result is None:
|
||||
return None
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def get_current(self, queue_id: str) -> Optional[SessionQueueItem]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'in_progress'
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'in_progress'
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
if result is None:
|
||||
return None
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
@@ -218,8 +241,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
error_traceback: Optional[str] = None,
|
||||
) -> SessionQueueItem:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = ?, error_type = ?, error_message = ?, error_traceback = ?
|
||||
@@ -227,10 +250,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(status, error_type, error_message, error_traceback, item_id),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
|
||||
@@ -238,36 +263,48 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return queue_item
|
||||
|
||||
def is_empty(self, queue_id: str) -> IsEmptyResult:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
is_empty = cast(int, cursor.fetchone()[0]) == 0
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
is_empty = cast(int, self.__cursor.fetchone()[0]) == 0
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return IsEmptyResult(is_empty=is_empty)
|
||||
|
||||
def is_full(self, queue_id: str) -> IsFullResult:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
is_full = cast(int, self.__cursor.fetchone()[0]) >= max_queue_size
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return IsFullResult(is_full=is_full)
|
||||
|
||||
def clear(self, queue_id: str) -> ClearResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
@@ -275,8 +312,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
DELETE
|
||||
FROM session_queue
|
||||
@@ -284,16 +321,17 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
self.__invoker.services.events.emit_queue_cleared(queue_id)
|
||||
return ClearResult(deleted=count)
|
||||
|
||||
def prune(self, queue_id: str) -> PruneResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id = ?
|
||||
@@ -303,7 +341,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
OR status = 'canceled'
|
||||
)
|
||||
"""
|
||||
cursor.execute(
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
@@ -311,8 +350,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
DELETE
|
||||
FROM session_queue
|
||||
@@ -320,10 +359,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return PruneResult(deleted=count)
|
||||
|
||||
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
@@ -352,8 +393,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
self.__lock.acquire()
|
||||
placeholders = ", ".join(["?" for _ in batch_ids])
|
||||
where = f"""--sql
|
||||
WHERE
|
||||
@@ -364,7 +405,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
AND status != 'failed'
|
||||
"""
|
||||
params = [queue_id] + batch_ids
|
||||
cursor.execute(
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
@@ -372,8 +413,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
tuple(params),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
@@ -381,18 +422,20 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
tuple(params),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelByBatchIDsResult(canceled=count)
|
||||
|
||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
self.__lock.acquire()
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
@@ -402,7 +445,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
AND status != 'failed'
|
||||
"""
|
||||
params = (queue_id, destination)
|
||||
cursor.execute(
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
@@ -410,8 +453,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
params,
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
@@ -419,18 +462,20 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
params,
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelByDestinationResult(canceled=count)
|
||||
|
||||
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
self.__lock.acquire()
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id is ?
|
||||
@@ -439,7 +484,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
AND status != 'failed'
|
||||
"""
|
||||
params = [queue_id]
|
||||
cursor.execute(
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
@@ -447,8 +492,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
tuple(params),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
@@ -456,7 +501,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
tuple(params),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
|
||||
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_id)
|
||||
@@ -464,19 +509,21 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
current_queue_item, batch_status, queue_status
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelByQueueIDResult(canceled=count)
|
||||
|
||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
AND status == 'pending'
|
||||
"""
|
||||
cursor.execute(
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
@@ -484,8 +531,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
@@ -493,35 +540,43 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelAllExceptCurrentResult(canceled=count)
|
||||
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT * FROM session_queue
|
||||
WHERE
|
||||
item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT * FROM session_queue
|
||||
WHERE
|
||||
item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], self.__cursor.fetchone())
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
if result is None:
|
||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
|
||||
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
|
||||
# during execution.
|
||||
session_json = session.model_dump_json(warnings=False, exclude_none=True)
|
||||
cursor.execute(
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
SET session = ?
|
||||
@@ -529,10 +584,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(session_json, item_id),
|
||||
)
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return self.get_queue_item(item_id)
|
||||
|
||||
def list_queue_items(
|
||||
@@ -543,71 +600,83 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
cursor: Optional[int] = None,
|
||||
status: Optional[QUEUE_ITEM_STATUS] = None,
|
||||
) -> CursorPaginatedResults[SessionQueueItemDTO]:
|
||||
cursor_ = self._conn.cursor()
|
||||
item_id = cursor
|
||||
query = """--sql
|
||||
SELECT item_id,
|
||||
status,
|
||||
priority,
|
||||
field_values,
|
||||
error_type,
|
||||
error_message,
|
||||
error_traceback,
|
||||
created_at,
|
||||
updated_at,
|
||||
completed_at,
|
||||
started_at,
|
||||
session_id,
|
||||
batch_id,
|
||||
queue_id,
|
||||
origin,
|
||||
destination
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
"""
|
||||
params: list[Union[str, int]] = [queue_id]
|
||||
|
||||
if status is not None:
|
||||
query += """--sql
|
||||
AND status = ?
|
||||
"""
|
||||
params.append(status)
|
||||
|
||||
if item_id is not None:
|
||||
query += """--sql
|
||||
AND (priority < ?) OR (priority = ? AND item_id > ?)
|
||||
"""
|
||||
params.extend([priority, priority, item_id])
|
||||
|
||||
query += """--sql
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT ?
|
||||
try:
|
||||
item_id = cursor
|
||||
self.__lock.acquire()
|
||||
query = """--sql
|
||||
SELECT item_id,
|
||||
status,
|
||||
priority,
|
||||
field_values,
|
||||
error_type,
|
||||
error_message,
|
||||
error_traceback,
|
||||
created_at,
|
||||
updated_at,
|
||||
completed_at,
|
||||
started_at,
|
||||
session_id,
|
||||
batch_id,
|
||||
queue_id,
|
||||
origin,
|
||||
destination
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
"""
|
||||
params.append(limit + 1)
|
||||
cursor_.execute(query, params)
|
||||
results = cast(list[sqlite3.Row], cursor_.fetchall())
|
||||
items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results]
|
||||
has_more = False
|
||||
if len(items) > limit:
|
||||
# remove the extra item
|
||||
items.pop()
|
||||
has_more = True
|
||||
params: list[Union[str, int]] = [queue_id]
|
||||
|
||||
if status is not None:
|
||||
query += """--sql
|
||||
AND status = ?
|
||||
"""
|
||||
params.append(status)
|
||||
|
||||
if item_id is not None:
|
||||
query += """--sql
|
||||
AND (priority < ?) OR (priority = ? AND item_id > ?)
|
||||
"""
|
||||
params.extend([priority, priority, item_id])
|
||||
|
||||
query += """--sql
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
params.append(limit + 1)
|
||||
self.__cursor.execute(query, params)
|
||||
results = cast(list[sqlite3.Row], self.__cursor.fetchall())
|
||||
items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results]
|
||||
has_more = False
|
||||
if len(items) > limit:
|
||||
# remove the extra item
|
||||
items.pop()
|
||||
has_more = True
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CursorPaginatedResults(items=items, limit=limit, has_more=has_more)
|
||||
|
||||
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], self.__cursor.fetchall())
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
current_item = self.get_current(queue_id=queue_id)
|
||||
total = sum(row[1] for row in counts_result)
|
||||
@@ -626,23 +695,29 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
|
||||
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*), origin, destination
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND batch_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, batch_id),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
total = sum(row[1] for row in result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in result}
|
||||
origin = result[0]["origin"] if result else None
|
||||
destination = result[0]["destination"] if result else None
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*), origin, destination
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND batch_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, batch_id),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self.__cursor.fetchall())
|
||||
total = sum(row[1] for row in result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in result}
|
||||
origin = result[0]["origin"] if result else None
|
||||
destination = result[0]["destination"] if result else None
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
return BatchStatus(
|
||||
batch_id=batch_id,
|
||||
@@ -658,18 +733,24 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
|
||||
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
AND destination = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, destination),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
AND destination = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, destination),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], self.__cursor.fetchall())
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
total = sum(row[1] for row in counts_result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
|
||||
@@ -688,7 +769,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
|
||||
"""Retries the given queue items"""
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
self.__lock.acquire()
|
||||
|
||||
values_to_insert: list[tuple] = []
|
||||
retried_item_ids: list[int] = []
|
||||
|
||||
@@ -731,7 +813,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
# TODO(psyche): Handle max queue size?
|
||||
|
||||
cursor.executemany(
|
||||
self.__cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
@@ -739,10 +821,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
values_to_insert,
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
retry_result = RetryItemsResult(
|
||||
queue_id=queue_id,
|
||||
retried_item_ids=retried_item_ids,
|
||||
|
||||
@@ -9,7 +9,6 @@ from torch import Tensor
|
||||
|
||||
from invokeai.app.invocations.constants import IMAGE_MODES
|
||||
from invokeai.app.invocations.fields import MetadataField, WithBoard, WithMetadata
|
||||
from invokeai.app.services.board_records.board_records_common import BoardRecordOrderBy
|
||||
from invokeai.app.services.boards.boards_common import BoardDTO
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
@@ -17,7 +16,6 @@ from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.invocation_services import InvocationServices
|
||||
from invokeai.app.services.model_records.model_records_base import UnknownModelException
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
@@ -104,9 +102,7 @@ class BoardsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
A list of all boards.
|
||||
"""
|
||||
return self._services.boards.get_all(
|
||||
order_by=BoardRecordOrderBy.CreatedAt, direction=SQLiteDirection.Descending
|
||||
)
|
||||
return self._services.boards.get_all()
|
||||
|
||||
def add_image_to_board(self, board_id: str, image_name: str) -> None:
|
||||
"""Adds an image to a board.
|
||||
@@ -126,11 +122,7 @@ class BoardsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
A list of all image names for the board.
|
||||
"""
|
||||
return self._services.board_images.get_all_board_image_names_for_board(
|
||||
board_id,
|
||||
categories=None,
|
||||
is_intermediate=None,
|
||||
)
|
||||
return self._services.board_images.get_all_board_image_names_for_board(board_id)
|
||||
|
||||
|
||||
class LoggerInterface(InvocationContextInterface):
|
||||
@@ -291,7 +283,7 @@ class ImagesInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
The local path of the image or thumbnail.
|
||||
"""
|
||||
return Path(self._services.images.get_path(image_name, thumbnail))
|
||||
return self._services.images.get_path(image_name, thumbnail)
|
||||
|
||||
|
||||
class TensorsInterface(InvocationContextInterface):
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import sqlite3
|
||||
import threading
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
|
||||
@@ -37,20 +38,14 @@ class SqliteDatabase:
|
||||
self.logger.info(f"Initializing database at {self.db_path}")
|
||||
|
||||
self.conn = sqlite3.connect(database=self.db_path or sqlite_memory, check_same_thread=False)
|
||||
self.lock = threading.RLock()
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
|
||||
if self.verbose:
|
||||
self.conn.set_trace_callback(self.logger.debug)
|
||||
|
||||
# Enable foreign key constraints
|
||||
self.conn.execute("PRAGMA foreign_keys = ON;")
|
||||
|
||||
# Enable Write-Ahead Logging (WAL) mode for better concurrency
|
||||
self.conn.execute("PRAGMA journal_mode = WAL;")
|
||||
|
||||
# Set a busy timeout to prevent database lockups during writes
|
||||
self.conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
|
||||
|
||||
def clean(self) -> None:
|
||||
"""
|
||||
Cleans the database by running the VACUUM command, reporting on the freed space.
|
||||
@@ -58,14 +53,15 @@ class SqliteDatabase:
|
||||
# No need to clean in-memory database
|
||||
if not self.db_path:
|
||||
return
|
||||
try:
|
||||
initial_db_size = Path(self.db_path).stat().st_size
|
||||
self.conn.execute("VACUUM;")
|
||||
self.conn.commit()
|
||||
final_db_size = Path(self.db_path).stat().st_size
|
||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||
if freed_space_in_mb > 0:
|
||||
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error cleaning database: {e}")
|
||||
raise
|
||||
with self.lock:
|
||||
try:
|
||||
initial_db_size = Path(self.db_path).stat().st_size
|
||||
self.conn.execute("VACUUM;")
|
||||
self.conn.commit()
|
||||
final_db_size = Path(self.db_path).stat().st_size
|
||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||
if freed_space_in_mb > 0:
|
||||
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error cleaning database: {e}")
|
||||
raise
|
||||
|
||||
@@ -43,45 +43,46 @@ class SqliteMigrator:
|
||||
|
||||
def run_migrations(self) -> bool:
|
||||
"""Migrates the database to the latest version."""
|
||||
# This throws if there is a problem.
|
||||
self._migration_set.validate_migration_chain()
|
||||
cursor = self._db.conn.cursor()
|
||||
self._create_migrations_table(cursor=cursor)
|
||||
with self._db.lock:
|
||||
# This throws if there is a problem.
|
||||
self._migration_set.validate_migration_chain()
|
||||
cursor = self._db.conn.cursor()
|
||||
self._create_migrations_table(cursor=cursor)
|
||||
|
||||
if self._migration_set.count == 0:
|
||||
self._logger.debug("No migrations registered")
|
||||
return False
|
||||
if self._migration_set.count == 0:
|
||||
self._logger.debug("No migrations registered")
|
||||
return False
|
||||
|
||||
if self._get_current_version(cursor=cursor) == self._migration_set.latest_version:
|
||||
self._logger.debug("Database is up to date, no migrations to run")
|
||||
return False
|
||||
if self._get_current_version(cursor=cursor) == self._migration_set.latest_version:
|
||||
self._logger.debug("Database is up to date, no migrations to run")
|
||||
return False
|
||||
|
||||
self._logger.info("Database update needed")
|
||||
self._logger.info("Database update needed")
|
||||
|
||||
# Make a backup of the db if it needs to be updated and is a file db
|
||||
if self._db.db_path is not None:
|
||||
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
|
||||
self._logger.info(f"Backing up database to {str(self._backup_path)}")
|
||||
# Use SQLite to do the backup
|
||||
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
|
||||
self._db.conn.backup(backup_conn)
|
||||
else:
|
||||
self._logger.info("Using in-memory database, no backup needed")
|
||||
# Make a backup of the db if it needs to be updated and is a file db
|
||||
if self._db.db_path is not None:
|
||||
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
|
||||
self._logger.info(f"Backing up database to {str(self._backup_path)}")
|
||||
# Use SQLite to do the backup
|
||||
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
|
||||
self._db.conn.backup(backup_conn)
|
||||
else:
|
||||
self._logger.info("Using in-memory database, no backup needed")
|
||||
|
||||
next_migration = self._migration_set.get(from_version=self._get_current_version(cursor))
|
||||
while next_migration is not None:
|
||||
self._run_migration(next_migration)
|
||||
next_migration = self._migration_set.get(self._get_current_version(cursor))
|
||||
self._logger.info("Database updated successfully")
|
||||
return True
|
||||
next_migration = self._migration_set.get(from_version=self._get_current_version(cursor))
|
||||
while next_migration is not None:
|
||||
self._run_migration(next_migration)
|
||||
next_migration = self._migration_set.get(self._get_current_version(cursor))
|
||||
self._logger.info("Database updated successfully")
|
||||
return True
|
||||
|
||||
def _run_migration(self, migration: Migration) -> None:
|
||||
"""Runs a single migration."""
|
||||
try:
|
||||
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
|
||||
# exception is raised.
|
||||
with self._db.conn as conn:
|
||||
with self._db.lock, self._db.conn as conn:
|
||||
cursor = conn.cursor()
|
||||
if self._get_current_version(cursor) != migration.from_version:
|
||||
raise MigrationError(
|
||||
@@ -107,26 +108,27 @@ class SqliteMigrator:
|
||||
|
||||
def _create_migrations_table(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Creates the migrations table for the database, if one does not already exist."""
|
||||
try:
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='migrations';")
|
||||
if cursor.fetchone() is not None:
|
||||
return
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
migrated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))
|
||||
);
|
||||
"""
|
||||
)
|
||||
cursor.execute("INSERT INTO migrations (version) VALUES (0);")
|
||||
cursor.connection.commit()
|
||||
self._logger.debug("Created migrations table")
|
||||
except sqlite3.Error as e:
|
||||
msg = f"Problem creating migrations table: {e}"
|
||||
self._logger.error(msg)
|
||||
cursor.connection.rollback()
|
||||
raise MigrationError(msg) from e
|
||||
with self._db.lock:
|
||||
try:
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='migrations';")
|
||||
if cursor.fetchone() is not None:
|
||||
return
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
migrated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))
|
||||
);
|
||||
"""
|
||||
)
|
||||
cursor.execute("INSERT INTO migrations (version) VALUES (0);")
|
||||
cursor.connection.commit()
|
||||
self._logger.debug("Created migrations table")
|
||||
except sqlite3.Error as e:
|
||||
msg = f"Problem creating migrations table: {e}"
|
||||
self._logger.error(msg)
|
||||
cursor.connection.rollback()
|
||||
raise MigrationError(msg) from e
|
||||
|
||||
@classmethod
|
||||
def _get_current_version(cls, cursor: sqlite3.Cursor) -> int:
|
||||
|
||||
@@ -17,7 +17,9 @@ from invokeai.app.util.misc import uuid_string
|
||||
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
@@ -25,25 +27,31 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
|
||||
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
|
||||
"""Gets a style preset by ID."""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM style_presets
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
|
||||
return StylePresetRecordDTO.from_dict(dict(row))
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM style_presets
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
row = self._cursor.fetchone()
|
||||
if row is None:
|
||||
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
|
||||
return StylePresetRecordDTO.from_dict(dict(row))
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
|
||||
style_preset_id = uuid_string()
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO style_presets (
|
||||
id,
|
||||
@@ -64,16 +72,18 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(style_preset_id)
|
||||
|
||||
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
|
||||
style_preset_ids = []
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
self._lock.acquire()
|
||||
for style_preset in style_presets:
|
||||
style_preset_id = uuid_string()
|
||||
style_preset_ids.append(style_preset_id)
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO style_presets (
|
||||
id,
|
||||
@@ -94,15 +104,17 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
return None
|
||||
|
||||
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
self._lock.acquire()
|
||||
# Change the name of a style preset
|
||||
if changes.name is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE style_presets
|
||||
SET name = ?
|
||||
@@ -113,7 +125,7 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
|
||||
# Change the preset data for a style preset
|
||||
if changes.preset_data is not None:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE style_presets
|
||||
SET preset_data = ?
|
||||
@@ -126,12 +138,14 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(style_preset_id)
|
||||
|
||||
def delete(self, style_preset_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE from style_presets
|
||||
WHERE id = ?;
|
||||
@@ -142,38 +156,46 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return None
|
||||
|
||||
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
|
||||
main_query = """
|
||||
SELECT
|
||||
*
|
||||
FROM style_presets
|
||||
"""
|
||||
try:
|
||||
self._lock.acquire()
|
||||
main_query = """
|
||||
SELECT
|
||||
*
|
||||
FROM style_presets
|
||||
"""
|
||||
|
||||
if type is not None:
|
||||
main_query += "WHERE type = ? "
|
||||
if type is not None:
|
||||
main_query += "WHERE type = ? "
|
||||
|
||||
main_query += "ORDER BY LOWER(name) ASC"
|
||||
main_query += "ORDER BY LOWER(name) ASC"
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
if type is not None:
|
||||
cursor.execute(main_query, (type,))
|
||||
else:
|
||||
cursor.execute(main_query)
|
||||
if type is not None:
|
||||
self._cursor.execute(main_query, (type,))
|
||||
else:
|
||||
self._cursor.execute(main_query)
|
||||
|
||||
rows = cursor.fetchall()
|
||||
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
|
||||
rows = self._cursor.fetchall()
|
||||
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
|
||||
|
||||
return style_presets
|
||||
return style_presets
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _sync_default_style_presets(self) -> None:
|
||||
"""Syncs default style presets to the database. Internal use only."""
|
||||
|
||||
# First delete all existing default style presets
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM style_presets
|
||||
WHERE type = "default";
|
||||
@@ -183,8 +205,10 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
# Next, parse and create the default style presets
|
||||
with open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
|
||||
with self._lock, open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
|
||||
presets = json.load(file)
|
||||
for preset in presets:
|
||||
style_preset = StylePresetWithoutId.model_validate(preset)
|
||||
|
||||
@@ -23,7 +23,9 @@ from invokeai.app.util.misc import uuid_string
|
||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
@@ -31,36 +33,42 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
|
||||
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||
"""Gets a workflow by ID. Updates the opened_at column."""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE workflow_library
|
||||
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||
FROM workflow_library
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
||||
return WorkflowRecordDTO.from_dict(dict(row))
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE workflow_library
|
||||
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||
FROM workflow_library
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
row = self._cursor.fetchone()
|
||||
if row is None:
|
||||
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
||||
return WorkflowRecordDTO.from_dict(dict(row))
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
|
||||
try:
|
||||
# Only user workflows may be created by this method
|
||||
assert workflow.meta.category is WorkflowCategory.User
|
||||
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO workflow_library (
|
||||
workflow_id,
|
||||
@@ -74,12 +82,14 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(workflow_with_id.id)
|
||||
|
||||
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE workflow_library
|
||||
SET workflow = ?
|
||||
@@ -91,12 +101,14 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return self.get(workflow.id)
|
||||
|
||||
def delete(self, workflow_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE from workflow_library
|
||||
WHERE workflow_id = ? AND category = 'user';
|
||||
@@ -107,6 +119,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
return None
|
||||
|
||||
def get_many(
|
||||
@@ -118,60 +132,66 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
per_page: Optional[int] = None,
|
||||
query: Optional[str] = None,
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
# sanitize!
|
||||
assert order_by in WorkflowRecordOrderBy
|
||||
assert direction in SQLiteDirection
|
||||
assert category in WorkflowCategory
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
category,
|
||||
name,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
opened_at
|
||||
FROM workflow_library
|
||||
WHERE category = ?
|
||||
"""
|
||||
main_params: list[int | str] = [category.value]
|
||||
count_params: list[int | str] = [category.value]
|
||||
try:
|
||||
self._lock.acquire()
|
||||
# sanitize!
|
||||
assert order_by in WorkflowRecordOrderBy
|
||||
assert direction in SQLiteDirection
|
||||
assert category in WorkflowCategory
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
category,
|
||||
name,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
opened_at
|
||||
FROM workflow_library
|
||||
WHERE category = ?
|
||||
"""
|
||||
main_params: list[int | str] = [category.value]
|
||||
count_params: list[int | str] = [category.value]
|
||||
|
||||
stripped_query = query.strip() if query else None
|
||||
if stripped_query:
|
||||
wildcard_query = "%" + stripped_query + "%"
|
||||
main_query += " AND name LIKE ? OR description LIKE ? "
|
||||
count_query += " AND name LIKE ? OR description LIKE ?;"
|
||||
main_params.extend([wildcard_query, wildcard_query])
|
||||
count_params.extend([wildcard_query, wildcard_query])
|
||||
stripped_query = query.strip() if query else None
|
||||
if stripped_query:
|
||||
wildcard_query = "%" + stripped_query + "%"
|
||||
main_query += " AND name LIKE ? OR description LIKE ? "
|
||||
count_query += " AND name LIKE ? OR description LIKE ?;"
|
||||
main_params.extend([wildcard_query, wildcard_query])
|
||||
count_params.extend([wildcard_query, wildcard_query])
|
||||
|
||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||
|
||||
if per_page:
|
||||
main_query += " LIMIT ? OFFSET ?"
|
||||
main_params.extend([per_page, page * per_page])
|
||||
if per_page:
|
||||
main_query += " LIMIT ? OFFSET ?"
|
||||
main_params.extend([per_page, page * per_page])
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(main_query, main_params)
|
||||
rows = cursor.fetchall()
|
||||
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
||||
self._cursor.execute(main_query, main_params)
|
||||
rows = self._cursor.fetchall()
|
||||
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
||||
|
||||
cursor.execute(count_query, count_params)
|
||||
total = cursor.fetchone()[0]
|
||||
self._cursor.execute(count_query, count_params)
|
||||
total = self._cursor.fetchone()[0]
|
||||
|
||||
if per_page:
|
||||
pages = total // per_page + (total % per_page > 0)
|
||||
else:
|
||||
pages = 1 # If no pagination, there is only one page
|
||||
if per_page:
|
||||
pages = total // per_page + (total % per_page > 0)
|
||||
else:
|
||||
pages = 1 # If no pagination, there is only one page
|
||||
|
||||
return PaginatedResults(
|
||||
items=workflows,
|
||||
page=page,
|
||||
per_page=per_page if per_page else total,
|
||||
pages=pages,
|
||||
total=total,
|
||||
)
|
||||
return PaginatedResults(
|
||||
items=workflows,
|
||||
page=page,
|
||||
per_page=per_page if per_page else total,
|
||||
pages=pages,
|
||||
total=total,
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _sync_default_workflows(self) -> None:
|
||||
"""Syncs default workflows to the database. Internal use only."""
|
||||
@@ -187,6 +207,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
"""
|
||||
|
||||
try:
|
||||
self._lock.acquire()
|
||||
workflows: list[Workflow] = []
|
||||
workflows_dir = Path(__file__).parent / Path("default_workflows")
|
||||
workflow_paths = workflows_dir.glob("*.json")
|
||||
@@ -197,15 +218,14 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
workflows.append(workflow)
|
||||
# Only default workflows may be managed by this method
|
||||
assert all(w.meta.category is WorkflowCategory.Default for w in workflows)
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM workflow_library
|
||||
WHERE category = 'default';
|
||||
"""
|
||||
)
|
||||
for w in workflows:
|
||||
cursor.execute(
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR REPLACE INTO workflow_library (
|
||||
workflow_id,
|
||||
@@ -219,3 +239,5 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
import logging
|
||||
import mimetypes
|
||||
import socket
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def find_open_port(port: int) -> int:
|
||||
"""Find a port not in use starting at given port"""
|
||||
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
|
||||
# https://github.com/WaylonWalker
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(1)
|
||||
if s.connect_ex(("localhost", port)) == 0:
|
||||
return find_open_port(port=port + 1)
|
||||
else:
|
||||
return port
|
||||
|
||||
|
||||
def check_cudnn(logger: logging.Logger) -> None:
|
||||
"""Check for cuDNN issues that could be causing degraded performance."""
|
||||
if torch.backends.cudnn.is_available():
|
||||
try:
|
||||
# Note: At the time of writing (torch 2.2.1), torch.backends.cudnn.version() only raises an error the first
|
||||
# time it is called. Subsequent calls will return the version number without complaining about a mismatch.
|
||||
cudnn_version = torch.backends.cudnn.version()
|
||||
logger.info(f"cuDNN version: {cudnn_version}")
|
||||
except RuntimeError as e:
|
||||
logger.warning(
|
||||
"Encountered a cuDNN version issue. This may result in degraded performance. This issue is usually "
|
||||
"caused by an incompatible cuDNN version installed in your python environment, or on the host "
|
||||
f"system. Full error message:\n{e}"
|
||||
)
|
||||
|
||||
|
||||
def enable_dev_reload() -> None:
|
||||
"""Enable hot reloading on python file changes during development."""
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
try:
|
||||
import jurigged
|
||||
except ImportError as e:
|
||||
raise RuntimeError(
|
||||
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.'
|
||||
) from e
|
||||
else:
|
||||
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
|
||||
|
||||
|
||||
def apply_monkeypatches() -> None:
|
||||
"""Apply monkeypatches to fix issues with third-party libraries."""
|
||||
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
|
||||
def register_mime_types() -> None:
|
||||
"""Register additional mime types for windows."""
|
||||
# Fix for windows mimetypes registry entries being borked.
|
||||
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
@@ -1,52 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def configure_torch_cuda_allocator(pytorch_cuda_alloc_conf: str, logger: logging.Logger):
|
||||
"""Configure the PyTorch CUDA memory allocator. See
|
||||
https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf for supported
|
||||
configurations.
|
||||
"""
|
||||
|
||||
if "torch" in sys.modules:
|
||||
raise RuntimeError("configure_torch_cuda_allocator() must be called before importing torch.")
|
||||
|
||||
# Log a warning if the PYTORCH_CUDA_ALLOC_CONF environment variable is already set.
|
||||
prev_cuda_alloc_conf = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", None)
|
||||
if prev_cuda_alloc_conf is not None:
|
||||
if prev_cuda_alloc_conf == pytorch_cuda_alloc_conf:
|
||||
logger.info(
|
||||
f"PYTORCH_CUDA_ALLOC_CONF is already set to '{pytorch_cuda_alloc_conf}'. Skipping configuration."
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.warning(
|
||||
f"Attempted to configure the PyTorch CUDA memory allocator with '{pytorch_cuda_alloc_conf}', but PYTORCH_CUDA_ALLOC_CONF is already set to "
|
||||
f"'{prev_cuda_alloc_conf}'. Skipping configuration."
|
||||
)
|
||||
return
|
||||
|
||||
# Configure the PyTorch CUDA memory allocator.
|
||||
# NOTE: It is important that this happens before torch is imported.
|
||||
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = pytorch_cuda_alloc_conf
|
||||
|
||||
import torch
|
||||
|
||||
# Relevant docs: https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf
|
||||
if not torch.cuda.is_available():
|
||||
raise RuntimeError(
|
||||
"Attempted to configure the PyTorch CUDA memory allocator, but no CUDA devices are available."
|
||||
)
|
||||
|
||||
# Verify that the torch allocator was properly configured.
|
||||
allocator_backend = torch.cuda.get_allocator_backend()
|
||||
expected_backend = "cudaMallocAsync" if "cudaMallocAsync" in pytorch_cuda_alloc_conf else "native"
|
||||
if allocator_backend != expected_backend:
|
||||
raise RuntimeError(
|
||||
f"Failed to configure the PyTorch CUDA memory allocator. Expected backend: '{expected_backend}', but got "
|
||||
f"'{allocator_backend}'. Verify that 1) the pytorch_cuda_alloc_conf is set correctly, and 2) that torch is "
|
||||
"not imported before calling configure_torch_cuda_allocator()."
|
||||
)
|
||||
|
||||
logger.info(f"PyTorch CUDA memory allocator: {torch.cuda.get_allocator_backend()}")
|
||||
@@ -75,8 +75,6 @@
|
||||
"idb-keyval": "^6.2.1",
|
||||
"jsondiffpatch": "^0.6.0",
|
||||
"konva": "^9.3.15",
|
||||
"linkify-react": "^4.2.0",
|
||||
"linkifyjs": "^4.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lru-cache": "^11.0.1",
|
||||
"mtwist": "^1.0.2",
|
||||
|
||||
20
invokeai/frontend/web/pnpm-lock.yaml
generated
20
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -74,12 +74,6 @@ dependencies:
|
||||
konva:
|
||||
specifier: ^9.3.15
|
||||
version: 9.3.15
|
||||
linkify-react:
|
||||
specifier: ^4.2.0
|
||||
version: 4.2.0(linkifyjs@4.2.0)(react@18.3.1)
|
||||
linkifyjs:
|
||||
specifier: ^4.2.0
|
||||
version: 4.2.0
|
||||
lodash-es:
|
||||
specifier: ^4.17.21
|
||||
version: 4.17.21
|
||||
@@ -6720,20 +6714,6 @@ packages:
|
||||
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
|
||||
dev: false
|
||||
|
||||
/linkify-react@4.2.0(linkifyjs@4.2.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-dIcDGo+n4FP2FPIHDcqB7cUE+omkcEgQJpc7sNNP4+XZ9FUhFAkKjGnHMzsZM+B4yF93sK166z9K5cKTe/JpzA==}
|
||||
peerDependencies:
|
||||
linkifyjs: ^4.0.0
|
||||
react: '>= 15.0.0'
|
||||
dependencies:
|
||||
linkifyjs: 4.2.0
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/linkifyjs@4.2.0:
|
||||
resolution: {integrity: sha512-pCj3PrQyATaoTYKHrgWRF3SJwsm61udVh+vuls/Rl6SptiDhgE7ziUIudAedRY9QEfynmM7/RmLEfPUyw1HPCw==}
|
||||
dev: false
|
||||
|
||||
/liqe@3.8.0:
|
||||
resolution: {integrity: sha512-cZ1rDx4XzxONBTskSPBp7/KwJ9qbUdF8EPnY4VjKXwHF1Krz9lgnlMTh1G7kd+KtPYvUte1mhuZeQSnk7KiSBg==}
|
||||
engines: {node: '>=12.0'}
|
||||
|
||||
@@ -107,13 +107,7 @@
|
||||
"min": "Min",
|
||||
"max": "Max",
|
||||
"resetToDefaults": "Auf Standard zurücksetzen",
|
||||
"seed": "Seed",
|
||||
"row": "Reihe",
|
||||
"column": "Spalte",
|
||||
"end": "Ende",
|
||||
"layout": "Layout",
|
||||
"board": "Ordner",
|
||||
"combinatorial": "Kombinatorisch"
|
||||
"seed": "Seed"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -622,9 +616,7 @@
|
||||
"hfTokenUnableToVerify": "HF-Token kann nicht überprüft werden",
|
||||
"hfTokenUnableToVerifyErrorMessage": "HuggingFace-Token kann nicht überprüft werden. Dies ist wahrscheinlich auf einen Netzwerkfehler zurückzuführen. Bitte versuchen Sie es später erneut.",
|
||||
"hfTokenSaved": "HF-Token gespeichert",
|
||||
"hfTokenRequired": "Sie versuchen, ein Modell herunterzuladen, für das ein gültiges HuggingFace-Token erforderlich ist.",
|
||||
"urlUnauthorizedErrorMessage2": "Hier erfahren wie.",
|
||||
"urlForbidden": "Sie haben keinen Zugriff auf dieses Modell"
|
||||
"hfTokenRequired": "Sie versuchen, ein Modell herunterzuladen, für das ein gültiges HuggingFace-Token erforderlich ist."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -691,8 +683,7 @@
|
||||
"iterations": "Iterationen",
|
||||
"guidance": "Führung",
|
||||
"coherenceMode": "Modus",
|
||||
"recallMetadata": "Metadaten abrufen",
|
||||
"gaussianBlur": "Gaußsche Unschärfe"
|
||||
"recallMetadata": "Metadaten abrufen"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Zwischenbilder anzeigen",
|
||||
@@ -892,8 +883,7 @@
|
||||
"canvas": "Leinwand",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_other": "Prompts",
|
||||
"batchSize": "Stapelgröße",
|
||||
"confirm": "Bestätigen"
|
||||
"batchSize": "Stapelgröße"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@@ -1308,13 +1298,7 @@
|
||||
"noBatchGroup": "keine Gruppe",
|
||||
"generatorNoValues": "leer",
|
||||
"generatorLoading": "wird geladen",
|
||||
"generatorLoadFromFile": "Aus Datei laden",
|
||||
"showEdgeLabels": "Kantenbeschriftungen anzeigen",
|
||||
"downloadWorkflowError": "Fehler beim Herunterladen des Arbeitsablaufs",
|
||||
"nodeName": "Knotenname",
|
||||
"description": "Beschreibung",
|
||||
"loadWorkflowDesc": "Arbeitsablauf laden?",
|
||||
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen."
|
||||
"generatorLoadFromFile": "Aus Datei laden"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Korrektur für hohe Auflösungen",
|
||||
|
||||
@@ -921,7 +921,6 @@
|
||||
"currentImage": "Current Image",
|
||||
"currentImageDescription": "Displays the current image in the Node Editor",
|
||||
"downloadWorkflow": "Download Workflow JSON",
|
||||
"downloadWorkflowError": "Error downloading workflow",
|
||||
"edge": "Edge",
|
||||
"edit": "Edit",
|
||||
"editMode": "Edit in Workflow Editor",
|
||||
@@ -1726,10 +1725,6 @@
|
||||
"layout": "Layout",
|
||||
"row": "Row",
|
||||
"column": "Column",
|
||||
"container": "Container",
|
||||
"heading": "Heading",
|
||||
"text": "Text",
|
||||
"divider": "Divider",
|
||||
"nodeField": "Node Field",
|
||||
"zoomToNode": "Zoom to Node",
|
||||
"nodeFieldTooltip": "To add a node field, click the small plus sign button on the field in the Workflow Editor, or drag the field by its name into the form.",
|
||||
@@ -1911,7 +1906,7 @@
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
|
||||
"warnings": {
|
||||
"problemsFound": "Problems found",
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
@@ -2306,8 +2301,12 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Memory Management: New setting for users with Nvidia GPUs to reduce VRAM usage.",
|
||||
"Performance: Continued improvements to overall application performance and responsiveness."
|
||||
"Improved VRAM setting defaults",
|
||||
"On-demand model cache clearing",
|
||||
"Expanded FLUX LoRA compatibility",
|
||||
"Canvas Adjust Image filter",
|
||||
"Cancel all but current queue item",
|
||||
"Copy from and paste to Canvas"
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -98,22 +98,7 @@
|
||||
"close": "Fermer",
|
||||
"clipboard": "Presse-papier",
|
||||
"loadingModel": "Chargement du modèle",
|
||||
"generating": "En Génération",
|
||||
"warnings": "Alertes",
|
||||
"layout": "Disposition",
|
||||
"row": "Ligne",
|
||||
"column": "Colonne",
|
||||
"start": "Commencer",
|
||||
"board": "Planche",
|
||||
"count": "Quantité",
|
||||
"step": "Étape",
|
||||
"end": "Fin",
|
||||
"min": "Min",
|
||||
"max": "Max",
|
||||
"values": "Valeurs",
|
||||
"resetToDefaults": "Réinitialiser par défaut",
|
||||
"seed": "Graine",
|
||||
"combinatorial": "Combinatoire"
|
||||
"generating": "En Génération"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Taille de l'image",
|
||||
@@ -180,9 +165,7 @@
|
||||
"imagesSettings": "Paramètres des images de la galerie",
|
||||
"assetsTab": "Fichiers que vous avez importés pour vos projets.",
|
||||
"imagesTab": "Images que vous avez créées et enregistrées dans Invoke.",
|
||||
"boardsSettings": "Paramètres des planches",
|
||||
"assets": "Ressources",
|
||||
"images": "Images"
|
||||
"boardsSettings": "Paramètres des planches"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestionnaire de modèle",
|
||||
@@ -306,7 +289,7 @@
|
||||
"usingDefaultSettings": "Utilisation des paramètres par défaut du modèle",
|
||||
"defaultSettingsOutOfSync": "Certain paramètres ne correspondent pas aux valeurs par défaut du modèle :",
|
||||
"restoreDefaultSettings": "Cliquez pour utiliser les paramètres par défaut du modèle.",
|
||||
"hfForbiddenErrorMessage": "Nous vous recommandons de visiter la page du modèle. Le propriétaire peut exiger l'acceptation des conditions pour pouvoir télécharger.",
|
||||
"hfForbiddenErrorMessage": "Nous vous recommandons de visiter la page du modèle sur HuggingFace.com. Le propriétaire peut exiger l'acceptation des conditions pour pouvoir télécharger.",
|
||||
"hfTokenRequired": "Vous essayez de télécharger un modèle qui nécessite un token HuggingFace valide.",
|
||||
"clipLEmbed": "CLIP-L Embed",
|
||||
"hfTokenSaved": "Token HF enregistré",
|
||||
@@ -320,10 +303,7 @@
|
||||
"hfForbidden": "Vous n'avez pas accès à ce modèle HF.",
|
||||
"hfTokenInvalidErrorMessage2": "Mettre à jour dans le ",
|
||||
"controlLora": "Controle LoRA",
|
||||
"urlUnauthorizedErrorMessage2": "Découvrir comment ici.",
|
||||
"urlUnauthorizedErrorMessage": "Vous devrez peut-être configurer un jeton API pour accéder à ce modèle.",
|
||||
"urlForbidden": "Vous n'avez pas accès à ce modèle",
|
||||
"urlForbiddenErrorMessage": "Vous devrez peut-être demander l'autorisation du site qui distribue le modèle."
|
||||
"urlUnauthorizedErrorMessage2": "Découvrir comment ici."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Images",
|
||||
@@ -365,31 +345,19 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la hauteur de la bounding box est {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la hauteur de la bounding box est {{height}}",
|
||||
"noFLUXVAEModelSelected": "Aucun modèle VAE sélectionné pour la génération FLUX",
|
||||
"canvasIsTransforming": "La Toile est occupée (en transformation)",
|
||||
"canvasIsRasterizing": "La Toile est occupée (en rastérisation)",
|
||||
"canvasIsTransforming": "La Toile se transforme",
|
||||
"canvasIsRasterizing": "La Toile se rastérise",
|
||||
"noCLIPEmbedModelSelected": "Aucun modèle CLIP Embed sélectionné pour la génération FLUX",
|
||||
"canvasIsFiltering": "La Toile est occupée (en filtration)",
|
||||
"canvasIsFiltering": "La Toile est en train de filtrer",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box est {{width}}",
|
||||
"noT5EncoderModelSelected": "Aucun modèle T5 Encoder sélectionné pour la génération FLUX",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box mise à l'échelle est {{width}}",
|
||||
"canvasIsCompositing": "La Toile est occupée (en composition)",
|
||||
"collectionTooFewItems": "trop peu d'éléments, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "trop d'éléments, maximum {{maxItems}}",
|
||||
"canvasIsCompositing": "La toile est en train de composer",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}} : trop peu d'éléments, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}} : trop d'éléments, maximum {{maxItems}}",
|
||||
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)",
|
||||
"emptyBatches": "lots vides",
|
||||
"batchNodeNotConnected": "Noeud de lots non connecté : {{label}}",
|
||||
"fluxModelMultipleControlLoRAs": "Vous ne pouvez utiliser qu'un seul Control LoRA à la fois",
|
||||
"collectionNumberLTMin": "{{value}} < {{minimum}} (incl. min)",
|
||||
"collectionNumberGTMax": "{{value}} > {{maximum}} (incl. max)",
|
||||
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (max exc)",
|
||||
"batchNodeEmptyCollection": "Certains nœuds de lot ont des collections vides",
|
||||
"batchNodeCollectionSizeMismatch": "Non-concordance de taille de collection sur le lot {{batchGroupId}}",
|
||||
"collectionStringTooLong": "trop long, max {{maxLength}}",
|
||||
"collectionNumberNotMultipleOf": "{{value}} n'est pas un multiple de {{multipleOf}}",
|
||||
"collectionEmpty": "collection vide",
|
||||
"collectionStringTooShort": "trop court, min {{minLength}}",
|
||||
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (min exc)",
|
||||
"batchNodeCollectionSizeMismatchNoGroupId": "Taille de collection de groupe par lot non conforme"
|
||||
"batchNodeNotConnected": "Noeud de lots non connecté : {{label}}"
|
||||
},
|
||||
"negativePromptPlaceholder": "Prompt Négatif",
|
||||
"positivePromptPlaceholder": "Prompt Positif",
|
||||
@@ -533,13 +501,7 @@
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Doit être au maximum une image PNG ou JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Doit être au maximum {{count}} images PNG ou JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Doit être au maximum {{count}} images PNG ou JPEG.",
|
||||
"addedToUncategorized": "Ajouté aux ressources de la planche $t(boards.uncategorized)",
|
||||
"pasteSuccess": "Collé à {{destination}}",
|
||||
"pasteFailed": "Échec du collage",
|
||||
"outOfMemoryErrorDescLocal": "Suivez notre <LinkComponent>guide Low VRAM</LinkComponent> pour réduire les OOMs.",
|
||||
"unableToCopy": "Incapable de Copier",
|
||||
"unableToCopyDesc": "Votre navigateur ne prend pas en charge l'accès au presse-papiers. Les utilisateurs de Firefox peuvent peut-être résoudre ce problème en suivant ",
|
||||
"unableToCopyDesc_theseSteps": "ces étapes"
|
||||
"addedToUncategorized": "Ajouté aux ressources de la planche $t(boards.uncategorized)"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Importer une image",
|
||||
@@ -697,14 +659,7 @@
|
||||
"iterations_many": "Itérations",
|
||||
"iterations_other": "Itérations",
|
||||
"back": "fin",
|
||||
"batchSize": "Taille de lot",
|
||||
"retryFailed": "Problème de nouvelle tentative de l'élément",
|
||||
"retrySucceeded": "Élément Retenté",
|
||||
"retryItem": "Réessayer l'élement",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Annuler tous les éléments de la file d'attente, sauf celui en cours, arrêtera les éléments en attente mais permettra à celui en cours de se terminer.",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Êtes-vous sûr de vouloir annuler tous les éléments en attente dans la file d'attente ?",
|
||||
"cancelAllExceptCurrentTooltip": "Annuler tout sauf l'élément actuel",
|
||||
"confirm": "Confirmer"
|
||||
"batchSize": "Taille de lot"
|
||||
},
|
||||
"prompt": {
|
||||
"noMatchingTriggers": "Pas de déclancheurs correspondants",
|
||||
@@ -1076,9 +1031,7 @@
|
||||
"controlNetWeight": {
|
||||
"heading": "Poids",
|
||||
"paragraphs": [
|
||||
"Poids du Control Adapter. Un poids plus élevé aura un impact plus important sur l'image finale.",
|
||||
"• Poids plus élevé (.75-2) : Crée un impact plus significatif sur le résultat final.",
|
||||
"• Poids inférieur (0-.75) : Crée un impact plus faible sur le résultat final."
|
||||
"Poids du Control Adapter. Un poids plus élevé aura un impact plus important sur l'image finale."
|
||||
]
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
@@ -1123,9 +1076,8 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Pourcentage de début / de fin d'étape",
|
||||
"paragraphs": [
|
||||
"Ce paramètre détérmine quelle portion du processus de débruitage (génération) utilisera cette couche comme guide.",
|
||||
"En général, les Control Adapter appliqués au début du processus guident la composition, tandis que les Control Adapter appliqués à la fin guident les détails.",
|
||||
"• Étape de fin (%): Spécifie quand arrêter d'appliquer le guide de cette couche et revenir aux guides généraux du modèle et aux autres paramètres."
|
||||
"La partie du processus de débruitage à laquelle le Control Adapter sera appliqué.",
|
||||
"En général, les Control Adapter appliqués au début du processus guident la composition, tandis que les Control Adapter appliqués à la fin guident les détails."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
@@ -1490,8 +1442,7 @@
|
||||
"showDynamicPrompts": "Afficher les Prompts dynamiques",
|
||||
"dynamicPrompts": "Prompts Dynamiques",
|
||||
"promptsPreview": "Prévisualisation des Prompts",
|
||||
"loading": "Génération des Pompts Dynamiques...",
|
||||
"promptsToGenerate": "Prompts à générer"
|
||||
"loading": "Génération des Pompts Dynamiques..."
|
||||
},
|
||||
"metadata": {
|
||||
"positivePrompt": "Prompt Positif",
|
||||
@@ -1702,22 +1653,7 @@
|
||||
"internalDesc": "Cette invocation est utilisée internalement par Invoke. En fonction des mises à jours il est possible que des changements y soit effectués ou qu'elle soit supprimé sans prévention.",
|
||||
"splitOn": "Diviser sur",
|
||||
"generatorNoValues": "vide",
|
||||
"addItem": "Ajouter un élément",
|
||||
"specialDesc": "Cette invocation nécessite un traitement spécial dans l'application. Par exemple, les nœuds Batch sont utilisés pour mettre en file d'attente plusieurs graphes à partir d'un seul workflow.",
|
||||
"unableToUpdateNode": "La mise à jour du nœud a échoué : nœud {{node}} de type {{type}} (peut nécessiter la suppression et la recréation).",
|
||||
"deletedMissingNodeFieldFormElement": "Champ de formulaire manquant supprimé : nœud {{nodeId}} champ {{fieldName}}",
|
||||
"nodeName": "Nom du nœud",
|
||||
"description": "Description",
|
||||
"loadWorkflowDesc": "Charger le workflow ?",
|
||||
"missingSourceOrTargetNode": "Nœud source ou cible manquant",
|
||||
"generatorImagesCategory": "Catégorie",
|
||||
"generatorImagesFromBoard": "Images de la Planche",
|
||||
"missingSourceOrTargetHandle": "Manque de gestionnaire source ou cible",
|
||||
"loadingTemplates": "Chargement de {{name}}",
|
||||
"loadWorkflowDesc2": "Votre workflow actuel contient des modifications non enregistrées.",
|
||||
"generatorImages_one": "{{count}} image",
|
||||
"generatorImages_many": "{{count}} images",
|
||||
"generatorImages_other": "{{count}} images"
|
||||
"addItem": "Ajouter un élément"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Aucun modèle correspondant",
|
||||
@@ -1776,41 +1712,13 @@
|
||||
"deleteWorkflow2": "Êtes-vous sûr de vouloir supprimer ce Workflow ? Cette action ne peut pas être annulé.",
|
||||
"download": "Télécharger",
|
||||
"copyShareLinkForWorkflow": "Copier le lien de partage pour le Workflow",
|
||||
"delete": "Supprimer",
|
||||
"builder": {
|
||||
"component": "Composant",
|
||||
"numberInput": "Entrée de nombre",
|
||||
"slider": "Curseur",
|
||||
"both": "Les deux",
|
||||
"singleLine": "Ligne unique",
|
||||
"multiLine": "Multi Ligne",
|
||||
"headingPlaceholder": "En-tête vide",
|
||||
"emptyRootPlaceholderEditMode": "Faites glisser un élément de formulaire ou un champ de nœud ici pour commencer.",
|
||||
"emptyRootPlaceholderViewMode": "Cliquez sur Modifier pour commencer à créer un formulaire pour ce workflow.",
|
||||
"containerPlaceholder": "Conteneur Vide",
|
||||
"row": "Ligne",
|
||||
"column": "Colonne",
|
||||
"layout": "Mise en page",
|
||||
"nodeField": "Champ de nœud",
|
||||
"zoomToNode": "Zoomer sur le nœud",
|
||||
"nodeFieldTooltip": "Pour ajouter un champ de nœud, cliquez sur le petit bouton plus sur le champ dans l'Éditeur de Workflow, ou faites glisser le champ par son nom dans le formulaire.",
|
||||
"addToForm": "Ajouter au formulaire",
|
||||
"label": "Étiquette",
|
||||
"textPlaceholder": "Texte vide",
|
||||
"builder": "Constructeur de Formulaire",
|
||||
"resetAllNodeFields": "Réinitialiser tous les champs de nœud",
|
||||
"deleteAllElements": "Supprimer tous les éléments de formulaire",
|
||||
"workflowBuilderAlphaWarning": "Le constructeur de workflow est actuellement en version alpha. Il peut y avoir des changements majeurs avant la version stable.",
|
||||
"showDescription": "Afficher la description"
|
||||
},
|
||||
"openLibrary": "Ouvrir la Bibliothèque"
|
||||
"delete": "Supprimer"
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Quoi de neuf dans Invoke",
|
||||
"watchRecentReleaseVideos": "Regarder les vidéos des dernières versions",
|
||||
"items": [
|
||||
"<StrongComponent>FLUX Guidage Régional (bêta)</StrongComponent> : Notre version bêta de FLUX Guidage Régional est en ligne pour le contrôle des prompt régionaux.",
|
||||
"Autres améliorations : mise en file d'attente par lots plus rapide, meilleur redimensionnement, sélecteur de couleurs amélioré et nœuds de métadonnées."
|
||||
"<StrongComponent>FLUX Guidage Régional (bêta)</StrongComponent> : Notre version bêta de FLUX Guidage Régional est en ligne pour le contrôle des prompt régionaux."
|
||||
],
|
||||
"readReleaseNotes": "Notes de version",
|
||||
"watchUiUpdatesOverview": "Aperçu des mises à jour de l'interface utilisateur"
|
||||
@@ -1924,49 +1832,7 @@
|
||||
"cancel": "Annuler",
|
||||
"advanced": "Avancé",
|
||||
"processingLayerWith": "Calque de traitement avec le filtre {{type}}.",
|
||||
"forMoreControl": "Pour plus de contrôle, cliquez sur Avancé ci-dessous.",
|
||||
"adjust_image": {
|
||||
"b": "B (LAB)",
|
||||
"blue": "Bleu (RGBA)",
|
||||
"alpha": "Alpha (RGBA)",
|
||||
"magenta": "Magenta (CMJN)",
|
||||
"yellow": "Jaune (CMJN)",
|
||||
"cb": "Cb (YCbCr)",
|
||||
"cr": "Cr (YCbCr)",
|
||||
"cyan": "Cyan (CMJN)",
|
||||
"label": "Ajuster l'image",
|
||||
"description": "Ajuste le canal sélectionné d'une image.",
|
||||
"channel": "Canal",
|
||||
"value_setting": "Valeur",
|
||||
"scale_values": "Valeurs d'échelle",
|
||||
"red": "Rouge (RGBA)",
|
||||
"green": "Vert (RGBA)",
|
||||
"black": "Noir (CMJN)",
|
||||
"hue": "Teinte (HSV)",
|
||||
"saturation": "Saturation (HSV)",
|
||||
"value": "Valeur (HSV)",
|
||||
"luminosity": "Luminosité (LAB)",
|
||||
"a": "A (LAB)",
|
||||
"y": "Y (YCbCr)"
|
||||
},
|
||||
"img_blur": {
|
||||
"label": "Flou de l'image",
|
||||
"blur_type": "Type de flou",
|
||||
"box_type": "Boîte",
|
||||
"description": "Floute la couche sélectionnée.",
|
||||
"blur_radius": "Rayon",
|
||||
"gaussian_type": "Gaussien"
|
||||
},
|
||||
"img_noise": {
|
||||
"label": "Image de bruit",
|
||||
"description": "Ajoute du bruit à la couche sélectionnée.",
|
||||
"gaussian_type": "Gaussien",
|
||||
"size": "Taille du bruit",
|
||||
"noise_amount": "Quantité",
|
||||
"noise_type": "Type de bruit",
|
||||
"salt_and_pepper_type": "Sel et Poivre",
|
||||
"noise_color": "Bruit coloré"
|
||||
}
|
||||
"forMoreControl": "Pour plus de contrôle, cliquez sur Avancé ci-dessous."
|
||||
},
|
||||
"canvasContextMenu": {
|
||||
"saveToGalleryGroup": "Enregistrer dans la galerie",
|
||||
@@ -1980,10 +1846,7 @@
|
||||
"newGlobalReferenceImage": "Nouvelle image de référence globale",
|
||||
"newControlLayer": "Nouveau couche de contrôle",
|
||||
"newInpaintMask": "Nouveau Masque Inpaint",
|
||||
"newRegionalGuidance": "Nouveau Guide Régional",
|
||||
"copyToClipboard": "Copier dans le presse-papiers",
|
||||
"copyBboxToClipboard": "Copier Bbox dans le presse-papiers",
|
||||
"copyCanvasToClipboard": "Copier la Toile dans le presse-papiers"
|
||||
"newRegionalGuidance": "Nouveau Guide Régional"
|
||||
},
|
||||
"bookmark": "Marque-page pour Changement Rapide",
|
||||
"saveLayerToAssets": "Enregistrer la couche dans les ressources",
|
||||
@@ -2149,10 +2012,7 @@
|
||||
"ipAdapterMethod": "Méthode d'IP Adapter",
|
||||
"full": "Complet",
|
||||
"style": "Style uniquement",
|
||||
"composition": "Composition uniquement",
|
||||
"fullDesc": "Applique le style visuel (couleurs, textures) et la composition (mise en page, structure).",
|
||||
"styleDesc": "Applique un style visuel (couleurs, textures) sans tenir compte de sa mise en page.",
|
||||
"compositionDesc": "Réplique la mise en page et la structure tout en ignorant le style de la référence."
|
||||
"composition": "Composition uniquement"
|
||||
},
|
||||
"fitBboxToLayers": "Ajuster la bounding box aux calques",
|
||||
"regionIsEmpty": "La zone sélectionnée est vide",
|
||||
@@ -2235,40 +2095,7 @@
|
||||
"asRasterLayerResize": "En tant que $t(controlLayers.rasterLayer) (Redimensionner)",
|
||||
"asControlLayer": "En tant que $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "En $t(controlLayers.controlLayer) (Redimensionner)",
|
||||
"newSession": "Nouvelle session",
|
||||
"warnings": {
|
||||
"controlAdapterIncompatibleBaseModel": "modèle de base de la couche de contrôle incompatible",
|
||||
"controlAdapterNoControl": "aucun contrôle sélectionné/dessiné",
|
||||
"rgNoPromptsOrIPAdapters": "pas de textes d'instructions ni d'images de référence",
|
||||
"rgAutoNegativeNotSupported": "Auto-négatif non pris en charge pour le modèle de base sélectionné",
|
||||
"rgNoRegion": "aucune région dessinée",
|
||||
"ipAdapterNoModelSelected": "aucun modèle d'image de référence sélectionné",
|
||||
"rgReferenceImagesNotSupported": "Les images de référence régionales ne sont pas prises en charge pour le modèle de base sélectionné",
|
||||
"problemsFound": "Problèmes trouvés",
|
||||
"unsupportedModel": "couche non prise en charge pour le modèle de base sélectionné",
|
||||
"rgNegativePromptNotSupported": "Prompt négatif non pris en charge pour le modèle de base sélectionné",
|
||||
"ipAdapterIncompatibleBaseModel": "modèle de base d'image de référence incompatible",
|
||||
"controlAdapterNoModelSelected": "aucun modèle de couche de contrôle sélectionné",
|
||||
"ipAdapterNoImageSelected": "Aucune image de référence sélectionnée."
|
||||
},
|
||||
"pasteTo": "Coller vers",
|
||||
"pasteToAssets": "Ressources",
|
||||
"pasteToAssetsDesc": "Coller dans les ressources",
|
||||
"pasteToBbox": "Bbox",
|
||||
"regionCopiedToClipboard": "{{region}} Copié dans le presse-papiers",
|
||||
"copyRegionError": "Erreur de copie {{region}}",
|
||||
"pasteToCanvas": "Toile",
|
||||
"errors": {
|
||||
"unableToFindImage": "Impossible de trouver l'image",
|
||||
"unableToLoadImage": "Impossible de charger l'image"
|
||||
},
|
||||
"referenceImageRegional": "Image de référence (régionale)",
|
||||
"pasteToBboxDesc": "Nouvelle couche (dans Bbox)",
|
||||
"pasteToCanvasDesc": "Nouvelle couche (dans la Toile)",
|
||||
"useImage": "Utiliser l'image",
|
||||
"pastedTo": "Collé à {{destination}}",
|
||||
"referenceImageEmptyState": "<UploadButton>Séléctionner une image</UploadButton> ou faites glisser une image depuis la <GalleryButton>galerie</GalleryButton> sur cette couche pour commencer.",
|
||||
"referenceImageGlobal": "Image de référence (Globale)"
|
||||
"newSession": "Nouvelle session"
|
||||
},
|
||||
"upscaling": {
|
||||
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
|
||||
@@ -2348,8 +2175,7 @@
|
||||
"queue": "File d'attente",
|
||||
"events": "Événements",
|
||||
"metadata": "Métadonnées",
|
||||
"gallery": "Galerie",
|
||||
"dnd": "Glisser et déposer"
|
||||
"gallery": "Galerie"
|
||||
},
|
||||
"logLevel": {
|
||||
"trace": "Trace",
|
||||
@@ -2366,8 +2192,7 @@
|
||||
"toGetStarted": "Pour commencer, saisissez un prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement dans la <StrongComponent>Galerie</StrongComponent> ou de les modifier sur la <StrongComponent>Toile</StrongComponent>.",
|
||||
"gettingStartedSeries": "Vous souhaitez plus de conseils ? Consultez notre <LinkComponent>Série de démarrage</LinkComponent> pour des astuces sur l'exploitation du plein potentiel de l'Invoke Studio.",
|
||||
"noModelsInstalled": "Il semble qu'aucun modèle ne soit installé",
|
||||
"toGetStartedLocal": "Pour commencer, assurez-vous de télécharger ou d'importer des modèles nécessaires pour exécuter Invoke. Ensuite, saisissez le prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement sur <StrongComponent>Galerie</StrongComponent> ou les modifier sur la <StrongComponent>Toile</StrongComponent>.",
|
||||
"lowVRAMMode": "Pour de meilleures performances, suivez notre <LinkComponent>guide Low VRAM</LinkComponent>."
|
||||
"toGetStartedLocal": "Pour commencer, assurez-vous de télécharger ou d'importer des modèles nécessaires pour exécuter Invoke. Ensuite, saisissez le prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement sur <StrongComponent>Galerie</StrongComponent> ou les modifier sur la <StrongComponent>Toile</StrongComponent>."
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Partager l'accès",
|
||||
@@ -2415,8 +2240,7 @@
|
||||
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Comment utiliser les masques d'inpainting ?",
|
||||
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
|
||||
"title": "Comment utiliser les masques d'inpainting ?"
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Créer votre première image",
|
||||
@@ -2436,10 +2260,5 @@
|
||||
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
|
||||
"supportVideos": "Vidéos d'assistance",
|
||||
"controlCanvas": "Contrôler la toile"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Effacer le cache du modèle",
|
||||
"clearSucceeded": "Cache du modèle effacée",
|
||||
"clearFailed": "Problème de nettoyage du cache du modèle"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,11 +105,7 @@
|
||||
"resetToDefaults": "Ripristina le impostazioni predefinite",
|
||||
"seed": "Seme",
|
||||
"combinatorial": "Combinatorio",
|
||||
"count": "Quantità",
|
||||
"board": "Bacheca",
|
||||
"layout": "Schema",
|
||||
"row": "Riga",
|
||||
"column": "Colonna"
|
||||
"count": "Quantità"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -177,8 +173,7 @@
|
||||
"assetsTab": "File che hai caricato per usarli nei tuoi progetti.",
|
||||
"boardsSettings": "Impostazioni Bacheche",
|
||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||
"assets": "Risorse",
|
||||
"images": "Immagini"
|
||||
"assets": "Risorse"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -708,8 +703,7 @@
|
||||
"collectionNumberNotMultipleOf": "{{value}} non è multiplo di {{multipleOf}}",
|
||||
"collectionNumberLTMin": "{{value}} < {{minimum}} (incr min)",
|
||||
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (excl max)",
|
||||
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (excl min)",
|
||||
"collectionEmpty": "raccolta vuota"
|
||||
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (excl min)"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -926,8 +920,8 @@
|
||||
"boolean": "Booleani",
|
||||
"node": "Nodo",
|
||||
"collection": "Raccolta",
|
||||
"cannotConnectInputToInput": "Impossibile collegare ingresso a ingresso",
|
||||
"cannotConnectOutputToOutput": "Impossibile collegare uscita ad uscita",
|
||||
"cannotConnectInputToInput": "Impossibile collegare Input a Input",
|
||||
"cannotConnectOutputToOutput": "Impossibile collegare Output ad Output",
|
||||
"cannotConnectToSelf": "Impossibile connettersi a se stesso",
|
||||
"mismatchedVersion": "Nodo non valido: il nodo {{node}} di tipo {{type}} ha una versione non corrispondente (provare ad aggiornare?)",
|
||||
"loadingNodes": "Caricamento nodi...",
|
||||
@@ -1020,21 +1014,7 @@
|
||||
"generatorNRandomValues_one": "{{count}} valore casuale",
|
||||
"generatorNRandomValues_many": "{{count}} valori casuali",
|
||||
"generatorNRandomValues_other": "{{count}} valori casuali",
|
||||
"arithmeticSequence": "Sequenza aritmetica",
|
||||
"nodeName": "Nome del nodo",
|
||||
"loadWorkflowDesc": "Caricare il flusso di lavoro?",
|
||||
"loadWorkflowDesc2": "Il flusso di lavoro corrente presenta modifiche non salvate.",
|
||||
"downloadWorkflowError": "Errore durante lo scaricamento del flusso di lavoro",
|
||||
"deletedMissingNodeFieldFormElement": "Campo modulo mancante eliminato: nodo {{nodeId}} campo {{fieldName}}",
|
||||
"loadingTemplates": "Caricamento {{name}}",
|
||||
"unableToUpdateNode": "Aggiornamento del nodo non riuscito: nodo {{node}} di tipo {{type}} (potrebbe essere necessario eliminarlo e ricrearlo)",
|
||||
"description": "Descrizione",
|
||||
"generatorImagesCategory": "Categoria",
|
||||
"generatorImages_one": "{{count}} immagine",
|
||||
"generatorImages_many": "{{count}} immagini",
|
||||
"generatorImages_other": "{{count}} immagini",
|
||||
"generatorImagesFromBoard": "Immagini dalla Bacheca",
|
||||
"missingSourceOrTargetNode": "Nodo sorgente o di destinazione mancante"
|
||||
"arithmeticSequence": "Sequenza aritmetica"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1160,10 +1140,7 @@
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Vuoi davvero annullare tutti gli elementi in coda in sospeso?",
|
||||
"confirm": "Conferma",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "L'annullamento di tutti gli elementi della coda, eccetto quello corrente, interromperà gli elementi in sospeso ma consentirà il completamento di quello in corso.",
|
||||
"cancelAllExceptCurrentTooltip": "Annulla tutto tranne l'elemento corrente",
|
||||
"retrySucceeded": "Elemento rieseguito",
|
||||
"retryItem": "Riesegui elemento",
|
||||
"retryFailed": "Problema riesecuzione elemento"
|
||||
"cancelAllExceptCurrentTooltip": "Annulla tutto tranne l'elemento corrente"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1741,38 +1718,7 @@
|
||||
"download": "Scarica",
|
||||
"copyShareLink": "Copia Condividi Link",
|
||||
"copyShareLinkForWorkflow": "Copia Condividi Link del Flusso di lavoro",
|
||||
"delete": "Elimina",
|
||||
"openLibrary": "Apri la libreria",
|
||||
"builder": {
|
||||
"resetAllNodeFields": "Reimposta tutti i campi del nodo",
|
||||
"row": "Riga",
|
||||
"nodeField": "Campo del nodo",
|
||||
"slider": "Cursore",
|
||||
"emptyRootPlaceholderEditMode": "Per iniziare, trascina qui un elemento del modulo o un campo nodo.",
|
||||
"containerPlaceholder": "Contenitore vuoto",
|
||||
"headingPlaceholder": "Titolo vuoto",
|
||||
"column": "Colonna",
|
||||
"nodeFieldTooltip": "Per aggiungere un campo nodo, fare clic sul piccolo pulsante con il segno più sul campo nell'editor del flusso di lavoro, oppure trascinare il campo in base al suo nome nel modulo.",
|
||||
"label": "Etichetta",
|
||||
"deleteAllElements": "Elimina tutti gli elementi del modulo",
|
||||
"addToForm": "Aggiungi al Modulo",
|
||||
"layout": "Schema",
|
||||
"builder": "Generatore Modulo",
|
||||
"zoomToNode": "Zoom sul nodo",
|
||||
"component": "Componente",
|
||||
"showDescription": "Mostra Descrizione",
|
||||
"singleLine": "Linea singola",
|
||||
"multiLine": "Linea Multipla",
|
||||
"both": "Entrambi",
|
||||
"emptyRootPlaceholderViewMode": "Fare clic su Modifica per iniziare a creare un modulo per questo flusso di lavoro.",
|
||||
"textPlaceholder": "Testo vuoto",
|
||||
"workflowBuilderAlphaWarning": "Il generatore di flussi di lavoro è attualmente in versione alpha. Potrebbero esserci cambiamenti radicali prima della versione stabile.",
|
||||
"heading": "Intestazione",
|
||||
"divider": "Divisore",
|
||||
"container": "Contenitore",
|
||||
"text": "Testo",
|
||||
"numberInput": "Ingresso numerico"
|
||||
}
|
||||
"delete": "Elimina"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -2194,7 +2140,7 @@
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello.",
|
||||
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||
@@ -2330,8 +2276,12 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Editor del flusso di lavoro: nuovo generatore di moduli trascina-e-rilascia per una creazione più facile del flusso di lavoro.",
|
||||
"Altri miglioramenti: messa in coda dei lotti più rapida, migliore ampliamento, selettore colore migliorato e nodi metadati."
|
||||
"Impostazioni predefinite VRAM migliorate",
|
||||
"Cancellazione della cache del modello su richiesta",
|
||||
"Compatibilità estesa FLUX LoRA",
|
||||
"Filtro Regola Immagine su Tela",
|
||||
"Annulla tutto tranne l'elemento della coda corrente",
|
||||
"Copia da e incolla sulla Tela"
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -32,29 +32,29 @@
|
||||
"learnMore": "もっと学ぶ",
|
||||
"random": "ランダム",
|
||||
"batch": "バッチマネージャー",
|
||||
"advanced": "高度",
|
||||
"advanced": "高度な設定",
|
||||
"created": "作成済",
|
||||
"green": "緑",
|
||||
"blue": "青",
|
||||
"alpha": "アルファ",
|
||||
"outpaint": "outpaint",
|
||||
"outpaint": "アウトペイント",
|
||||
"unknown": "不明",
|
||||
"updated": "更新済",
|
||||
"add": "追加",
|
||||
"ai": "ai",
|
||||
"ai": "AI",
|
||||
"copyError": "$t(gallery.copy) エラー",
|
||||
"data": "データ",
|
||||
"template": "テンプレート",
|
||||
"red": "赤",
|
||||
"or": "または",
|
||||
"checkpoint": "Checkpoint",
|
||||
"checkpoint": "チェックポイント",
|
||||
"direction": "方向",
|
||||
"simple": "シンプル",
|
||||
"save": "保存",
|
||||
"saveAs": "名前をつけて保存",
|
||||
"somethingWentWrong": "何かの問題が発生しました",
|
||||
"details": "詳細",
|
||||
"inpaint": "inpaint",
|
||||
"inpaint": "インペイント",
|
||||
"delete": "削除",
|
||||
"nextPage": "次のページ",
|
||||
"copy": "コピー",
|
||||
@@ -70,12 +70,12 @@
|
||||
"unknownError": "未知のエラー",
|
||||
"orderBy": "並び順:",
|
||||
"enabled": "有効",
|
||||
"notInstalled": "未 $t(common.installed)",
|
||||
"notInstalled": "未インストール",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"selected": "選択済み",
|
||||
"aboutDesc": "Invokeを業務で利用する場合はマークしてください:",
|
||||
"beta": "Beta",
|
||||
"beta": "ベータ",
|
||||
"disabled": "無効",
|
||||
"editor": "エディタ",
|
||||
"safetensors": "Safetensors",
|
||||
@@ -93,27 +93,7 @@
|
||||
"reset": "リセット",
|
||||
"none": "なし",
|
||||
"new": "新規",
|
||||
"close": "閉じる",
|
||||
"warnings": "警告",
|
||||
"dontShowMeThese": "次回から表示しない",
|
||||
"goTo": "移動",
|
||||
"generating": "生成中",
|
||||
"loadingModel": "モデルをロード中",
|
||||
"layout": "レイアウト",
|
||||
"step": "ステップ",
|
||||
"start": "開始",
|
||||
"count": "回数",
|
||||
"end": "終了",
|
||||
"min": "最小",
|
||||
"max": "最大",
|
||||
"values": "値",
|
||||
"resetToDefaults": "デフォルトに戻す",
|
||||
"row": "行",
|
||||
"column": "列",
|
||||
"board": "ボード",
|
||||
"seed": "シード",
|
||||
"combinatorial": "組み合わせ",
|
||||
"aboutHeading": "想像力をこの手に"
|
||||
"close": "閉じる"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
@@ -129,7 +109,7 @@
|
||||
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
||||
"drop": "ドロップ",
|
||||
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||
"deleteImage_other": "画像 {{count}} 枚を削除",
|
||||
"deleteImage_other": "画像を削除",
|
||||
"deleteImagePermanent": "削除された画像は復元できません。",
|
||||
"download": "ダウンロード",
|
||||
"unableToLoad": "ギャラリーをロードできません",
|
||||
@@ -175,12 +155,7 @@
|
||||
"displayBoardSearch": "ボード検索",
|
||||
"displaySearch": "画像を検索",
|
||||
"boardsSettings": "ボード設定",
|
||||
"imagesSettings": "ギャラリー画像設定",
|
||||
"selectAllOnPage": "ページ上のすべてを選択",
|
||||
"images": "画像",
|
||||
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
|
||||
"imagesTab": "Invoke内で作成および保存された画像。",
|
||||
"assets": "アセット"
|
||||
"imagesSettings": "ギャラリー画像設定"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
@@ -205,121 +180,44 @@
|
||||
},
|
||||
"canvas": {
|
||||
"redo": {
|
||||
"title": "やり直し",
|
||||
"desc": "最後のキャンバス操作をやり直します。"
|
||||
"title": "やり直し"
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "変形",
|
||||
"desc": "選択したレイヤーを変形します。"
|
||||
"title": "変形"
|
||||
},
|
||||
"undo": {
|
||||
"title": "取り消し",
|
||||
"desc": "最後のキャンバス操作を取り消します。"
|
||||
"title": "取り消し"
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "消しゴムツール",
|
||||
"desc": "消しゴムツールを選択します。"
|
||||
"title": "消しゴムツール"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "変形をキャンセル",
|
||||
"desc": "保留中の変形をキャンセルします。"
|
||||
"title": "変形をキャンセル"
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "レイヤーをリセット",
|
||||
"desc": "選択したレイヤーをリセットします。この操作はInpaint MaskおよびRegional Guidanceにのみ適用されます。"
|
||||
"title": "レイヤーをリセット"
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "変形を適用",
|
||||
"desc": "保留中の変形を選択したレイヤーに適用します。"
|
||||
"title": "変形を適用"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "スポイトツール",
|
||||
"desc": "スポイトツールを選択します。"
|
||||
"title": "スポイトツール"
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "バウンディングボックスをキャンバスにフィット",
|
||||
"desc": "バウンディングボックスがキャンバスに収まるように表示を拡大、位置調整します。"
|
||||
"title": "バウンディングボックスをキャンバスにフィット"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "ブラシツール",
|
||||
"desc": "ブラシツールを選択します。"
|
||||
"title": "ブラシツール"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"title": "移動ツール",
|
||||
"desc": "移動ツールを選択します。"
|
||||
"title": "移動ツール"
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"title": "バウンディングボックスツール",
|
||||
"desc": "バウンディングボックスツールを選択します。"
|
||||
"title": "バウンディングボックスツール"
|
||||
},
|
||||
"title": "キャンバス",
|
||||
"fitLayersToCanvas": {
|
||||
"title": "レイヤーをキャンバスにフィット",
|
||||
"desc": "すべての表示レイヤーがキャンバスに収まるように表示を拡大、位置調整します。"
|
||||
},
|
||||
"setZoomTo400Percent": {
|
||||
"desc": "キャンバスのズームを400%に設定します。",
|
||||
"title": "400%にズーム"
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"title": "800%にズーム",
|
||||
"desc": "キャンバスのズームを800%に設定します。"
|
||||
},
|
||||
"quickSwitch": {
|
||||
"title": "レイヤーのクイックスイッチ",
|
||||
"desc": "最後に選択した2つのレイヤー間を切り替えます。レイヤーがブックマークされている場合、常にそのレイヤーと最後に選択したブックマークされていないレイヤーの間を切り替えます。"
|
||||
},
|
||||
"nextEntity": {
|
||||
"title": "次のレイヤー",
|
||||
"desc": "リスト内の次のレイヤーを選択します。"
|
||||
},
|
||||
"filterSelected": {
|
||||
"title": "フィルター",
|
||||
"desc": "選択したレイヤーをフィルターします。RasterおよびControlレイヤーにのみ適用されます。"
|
||||
},
|
||||
"prevEntity": {
|
||||
"desc": "リスト内の前のレイヤーを選択します。",
|
||||
"title": "前のレイヤー"
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "ツール色を白に設定",
|
||||
"desc": "現在のツールの色を白色に設定します。"
|
||||
},
|
||||
"selectViewTool": {
|
||||
"title": "表示ツール",
|
||||
"desc": "表示ツールを選択します。"
|
||||
},
|
||||
"setZoomTo100Percent": {
|
||||
"title": "100%にズーム",
|
||||
"desc": "キャンバスのズームを100%に設定します。"
|
||||
},
|
||||
"deleteSelected": {
|
||||
"desc": "選択したレイヤーを削除します。",
|
||||
"title": "レイヤーを削除"
|
||||
},
|
||||
"cancelFilter": {
|
||||
"desc": "保留中のフィルターをキャンセルします。",
|
||||
"title": "フィルターをキャンセル"
|
||||
},
|
||||
"applyFilter": {
|
||||
"title": "フィルターを適用",
|
||||
"desc": "保留中のフィルターを選択したレイヤーに適用します。"
|
||||
},
|
||||
"setZoomTo200Percent": {
|
||||
"title": "200%にズーム",
|
||||
"desc": "キャンバスのズームを200%に設定します。"
|
||||
},
|
||||
"decrementToolWidth": {
|
||||
"title": "ツール幅を縮小する",
|
||||
"desc": "選択中のブラシまたは消しゴムツールの幅を減少させます。"
|
||||
},
|
||||
"incrementToolWidth": {
|
||||
"desc": "選択中のブラシまたは消しゴムツールの幅を増加させます。",
|
||||
"title": "ツール幅を増加する"
|
||||
},
|
||||
"selectRectTool": {
|
||||
"title": "矩形ツール",
|
||||
"desc": "矩形ツールを選択します。"
|
||||
"title": "レイヤーをキャンバスにフィット"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -328,13 +226,6 @@
|
||||
},
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
},
|
||||
"title": "ワークフロー",
|
||||
"pasteSelection": {
|
||||
"title": "ペースト"
|
||||
},
|
||||
"copySelection": {
|
||||
"title": "コピー"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
@@ -344,62 +235,16 @@
|
||||
},
|
||||
"title": "アプリケーション",
|
||||
"invoke": {
|
||||
"title": "生成",
|
||||
"desc": "生成をキューに追加し、キューの末尾に加えます。"
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "キャンセル",
|
||||
"desc": "現在処理中のキュー項目をキャンセルします。"
|
||||
"title": "キャンセル"
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "キューをクリア",
|
||||
"desc": "すべてのキュー項目をキャンセルして消去します。"
|
||||
},
|
||||
"selectCanvasTab": {
|
||||
"desc": "キャンバスタブを選択します。",
|
||||
"title": "キャンバスタブを選択"
|
||||
},
|
||||
"selectUpscalingTab": {
|
||||
"desc": "アップスケーリングタブを選択します。",
|
||||
"title": "アップスケーリングタブを選択"
|
||||
},
|
||||
"toggleRightPanel": {
|
||||
"desc": "右パネルを表示または非表示。",
|
||||
"title": "右パネルをトグル"
|
||||
},
|
||||
"selectModelsTab": {
|
||||
"title": "モデルタブを選択",
|
||||
"desc": "モデルタブを選択します。"
|
||||
},
|
||||
"invokeFront": {
|
||||
"desc": "生成をキューに追加し、キューの先頭に加えます。",
|
||||
"title": "生成(先頭)"
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "パネルレイアウトをリセット",
|
||||
"desc": "左パネルと右パネルをデフォルトのサイズとレイアウトにリセットします。"
|
||||
},
|
||||
"togglePanels": {
|
||||
"desc": "左パネルと右パネルを合わせて表示または非表示。",
|
||||
"title": "パネルをトグル"
|
||||
},
|
||||
"selectWorkflowsTab": {
|
||||
"desc": "ワークフロータブを選択します。",
|
||||
"title": "ワークフロータブを選択"
|
||||
},
|
||||
"selectQueueTab": {
|
||||
"title": "キュータブを選択",
|
||||
"desc": "キュータブを選択します。"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "プロンプトにフォーカス",
|
||||
"desc": "カーソルをポジティブプロンプト欄に移動します。"
|
||||
"title": "キューをクリア"
|
||||
}
|
||||
},
|
||||
"hotkeys": "ホットキー",
|
||||
"gallery": {
|
||||
"title": "ギャラリー"
|
||||
}
|
||||
"hotkeys": "ホットキー"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "モデルマネージャ",
|
||||
@@ -410,13 +255,13 @@
|
||||
"name": "名前",
|
||||
"description": "概要",
|
||||
"config": "コンフィグ",
|
||||
"repo_id": "リポジトリID",
|
||||
"repo_id": "Repo ID",
|
||||
"width": "幅",
|
||||
"height": "高さ",
|
||||
"addModel": "モデルを追加",
|
||||
"availableModels": "モデルを有効化",
|
||||
"search": "検索",
|
||||
"load": "ロード",
|
||||
"load": "Load",
|
||||
"active": "active",
|
||||
"selected": "選択済",
|
||||
"delete": "削除",
|
||||
@@ -436,7 +281,7 @@
|
||||
"modelConverted": "モデル変換が完了しました",
|
||||
"predictionType": "予測タイプ(SD 2.x モデルおよび一部のSD 1.x モデル用)",
|
||||
"selectModel": "モデルを選択",
|
||||
"advanced": "高度",
|
||||
"advanced": "高度な設定",
|
||||
"modelDeleted": "モデルが削除されました",
|
||||
"convertToDiffusersHelpText2": "このプロセスでは、モデルマネージャーのエントリーを同じモデルのディフューザーバージョンに置き換えます。",
|
||||
"modelUpdateFailed": "モデル更新が失敗しました",
|
||||
@@ -449,20 +294,7 @@
|
||||
"convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。",
|
||||
"cancel": "キャンセル",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"addModels": "モデルを追加",
|
||||
"modelName": "モデル名",
|
||||
"source": "ソース",
|
||||
"path": "パス",
|
||||
"modelSettings": "モデル設定",
|
||||
"vae": "VAE",
|
||||
"huggingFace": "HuggingFace",
|
||||
"huggingFaceRepoID": "HuggingFace リポジトリID",
|
||||
"metadata": "メタデータ",
|
||||
"loraModels": "LoRA",
|
||||
"edit": "編集",
|
||||
"install": "インストール",
|
||||
"huggingFacePlaceholder": "owner/model-name",
|
||||
"variant": "Variant"
|
||||
"addModels": "モデルを追加"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -473,7 +305,7 @@
|
||||
"shuffle": "シャッフル",
|
||||
"strength": "強度",
|
||||
"upscaling": "アップスケーリング",
|
||||
"scale": "スケール",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "処理前のスケール",
|
||||
"scaledWidth": "幅のスケール",
|
||||
"scaledHeight": "高さのスケール",
|
||||
@@ -482,7 +314,7 @@
|
||||
"useSeed": "シード値を使用",
|
||||
"useAll": "すべてを使用",
|
||||
"info": "情報",
|
||||
"showOptionsPanel": "サイドパネルを表示 (O or T)",
|
||||
"showOptionsPanel": "オプションパネルを表示",
|
||||
"iterations": "生成回数",
|
||||
"general": "基本設定",
|
||||
"setToOptimalSize": "サイズをモデルに最適化",
|
||||
@@ -496,29 +328,16 @@
|
||||
"useSize": "サイズを使用",
|
||||
"postProcessing": "ポストプロセス (Shift + U)",
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"recallMetadata": "メタデータを再使用",
|
||||
"copyImage": "画像をコピー",
|
||||
"positivePromptPlaceholder": "ポジティブプロンプト",
|
||||
"negativePromptPlaceholder": "ネガティブプロンプト",
|
||||
"type": "タイプ",
|
||||
"cancel": {
|
||||
"cancel": "キャンセル"
|
||||
},
|
||||
"cfgScale": "CFGスケール",
|
||||
"tileSize": "タイルサイズ",
|
||||
"coherenceMode": "モード"
|
||||
"recallMetadata": "メタデータを再使用"
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
"displayInProgress": "生成中の画像を表示",
|
||||
"displayInProgress": "生成中の画像を表示する",
|
||||
"confirmOnDelete": "削除時に確認",
|
||||
"resetWebUI": "WebUIをリセット",
|
||||
"resetWebUIDesc1": "WebUIのリセットは、画像と保存された設定のキャッシュをリセットするだけです。画像を削除するわけではありません。",
|
||||
"resetWebUIDesc2": "もしギャラリーに画像が表示されないなど、何か問題が発生した場合はGitHubにissueを提出する前にリセットを試してください。",
|
||||
"resetComplete": "WebUIはリセットされました。",
|
||||
"ui": "ユーザーインターフェイス",
|
||||
"beta": "ベータ",
|
||||
"developer": "開発者"
|
||||
"resetComplete": "WebUIはリセットされました。F5を押して再読み込みしてください。"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "アップロード失敗",
|
||||
@@ -526,8 +345,7 @@
|
||||
"imageUploadFailed": "画像のアップロードに失敗しました",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
|
||||
"sentToUpscale": "アップスケーラーに転送しました",
|
||||
"imageUploaded": "画像をアップロードしました",
|
||||
"serverError": "サーバーエラー"
|
||||
"imageUploaded": "画像をアップロードしました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -538,7 +356,7 @@
|
||||
"menu": "メニュー",
|
||||
"createIssue": "問題を報告",
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"mode": "モード",
|
||||
"mode": "モード:",
|
||||
"about": "Invoke について",
|
||||
"submitSupportTicket": "サポート依頼を送信する",
|
||||
"uploadImages": "画像をアップロード",
|
||||
@@ -555,20 +373,7 @@
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"strength": "Image to Image 強度",
|
||||
"recallParameters": "パラメータを再使用",
|
||||
"recallParameter": "{{label}} を再使用",
|
||||
"imageDimensions": "画像サイズ",
|
||||
"imageDetails": "画像の詳細",
|
||||
"model": "モデル",
|
||||
"allPrompts": "すべてのプロンプト",
|
||||
"cfgScale": "CFGスケール",
|
||||
"createdBy": "作成:",
|
||||
"metadata": "メタデータ",
|
||||
"height": "高さ",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"generationMode": "生成モード",
|
||||
"vae": "VAE",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"canvasV2Metadata": "キャンバス"
|
||||
"recallParameter": "{{label}} を再使用"
|
||||
},
|
||||
"queue": {
|
||||
"queueEmpty": "キューが空です",
|
||||
@@ -600,7 +405,7 @@
|
||||
"batchQueuedDesc_other": "{{count}} セッションをキューの{{direction}}に追加しました",
|
||||
"graphQueued": "グラフをキューに追加しました",
|
||||
"batch": "バッチ",
|
||||
"clearQueueAlertDialog": "キューをクリアすると、処理中の項目は直ちにキャンセルされ、キューは完全にクリアされます。保留中のフィルターもキャンセルされます。",
|
||||
"clearQueueAlertDialog": "キューをクリアすると、処理中のアイテムは直ちにキャンセルされ、キューは完全にクリアされます。",
|
||||
"pending": "保留中",
|
||||
"resumeFailed": "処理の再開に問題があります",
|
||||
"clear": "クリア",
|
||||
@@ -618,7 +423,7 @@
|
||||
"enqueueing": "バッチをキューに追加",
|
||||
"cancelBatchFailed": "バッチのキャンセルに問題があります",
|
||||
"clearQueueAlertDialog2": "キューをクリアしてもよろしいですか?",
|
||||
"item": "項目",
|
||||
"item": "アイテム",
|
||||
"graphFailedToQueue": "グラフをキューに追加できませんでした",
|
||||
"batchFieldValues": "バッチの詳細",
|
||||
"openQueue": "キューを開く",
|
||||
@@ -634,17 +439,7 @@
|
||||
"upscaling": "アップスケール",
|
||||
"generation": "生成",
|
||||
"other": "その他",
|
||||
"gallery": "ギャラリー",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "すべての保留中のキュー項目をキャンセルしてもよいですか?",
|
||||
"cancelAllExceptCurrentTooltip": "現在の項目を除いてすべてキャンセル",
|
||||
"origin": "先頭",
|
||||
"destination": "宛先",
|
||||
"confirm": "確認",
|
||||
"retryItem": "項目をリトライ",
|
||||
"batchSize": "バッチサイズ",
|
||||
"retryFailed": "項目のリトライに問題があります",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "現在の項目を除くすべてのキュー項目をキャンセルすると、保留中の項目は停止しますが、進行中の項目は完了します。",
|
||||
"retrySucceeded": "項目がリトライされました"
|
||||
"gallery": "ギャラリー"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "一致するモデルがありません",
|
||||
@@ -653,14 +448,13 @@
|
||||
"noModelsAvailable": "使用可能なモデルがありません",
|
||||
"selectModel": "モデルを選択してください",
|
||||
"concepts": "コンセプト",
|
||||
"addLora": "LoRAを追加",
|
||||
"lora": "LoRA"
|
||||
"addLora": "LoRAを追加"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
"boolean": "ブーリアン",
|
||||
"addNodeToolTip": "ノードを追加 (Shift+A, Space)",
|
||||
"missingTemplate": "Invalid node: タイプ {{type}} のノード {{node}} にテンプレートがありません(未インストール?)",
|
||||
"missingTemplate": "テンプレートが見つかりません",
|
||||
"loadWorkflow": "ワークフローを読み込み",
|
||||
"hideLegendNodes": "フィールドタイプの凡例を非表示",
|
||||
"float": "浮動小数点",
|
||||
@@ -671,7 +465,7 @@
|
||||
"currentImageDescription": "ノードエディタ内の現在の画像を表示",
|
||||
"downloadWorkflow": "ワークフローのJSONをダウンロード",
|
||||
"fieldTypesMustMatch": "フィールドタイプが一致している必要があります",
|
||||
"edge": "エッジ",
|
||||
"edge": "輪郭",
|
||||
"animatedEdgesHelp": "選択したエッジおよび選択したノードに接続されたエッジをアニメーション化します",
|
||||
"cannotDuplicateConnection": "重複した接続は作れません",
|
||||
"noWorkflow": "ワークフローがありません",
|
||||
@@ -690,20 +484,7 @@
|
||||
"cannotConnectToSelf": "自身のノードには接続できません",
|
||||
"colorCodeEdges": "カラー-Code Edges",
|
||||
"loadingNodes": "ノードを読み込み中...",
|
||||
"scheduler": "スケジューラー",
|
||||
"version": "バージョン",
|
||||
"edit": "編集",
|
||||
"nodeVersion": "ノードバージョン",
|
||||
"workflowTags": "タグ",
|
||||
"string": "文字列",
|
||||
"workflowVersion": "バージョン",
|
||||
"workflowAuthor": "作者",
|
||||
"ipAdapter": "IP-Adapter",
|
||||
"notes": "ノート",
|
||||
"workflow": "ワークフロー",
|
||||
"workflowName": "名前",
|
||||
"workflowNotes": "ノート",
|
||||
"enum": "Enum"
|
||||
"scheduler": "スケジューラー"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "自動追加するボード",
|
||||
@@ -725,7 +506,7 @@
|
||||
"deleteBoard": "ボードの削除",
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
@@ -737,12 +518,7 @@
|
||||
"archiveBoard": "ボードをアーカイブ",
|
||||
"archived": "アーカイブ完了",
|
||||
"unarchiveBoard": "アーカイブされていないボード",
|
||||
"imagesWithCount_other": "{{count}} の画像",
|
||||
"updateBoardError": "ボード更新エラー",
|
||||
"selectedForAutoAdd": "自動追加に選択済み",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
|
||||
"noBoards": "{{boardType}} ボードがありません",
|
||||
"viewBoards": "ボードを表示"
|
||||
"imagesWithCount_other": "{{count}} の画像"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -794,57 +570,6 @@
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "ステップ"
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
},
|
||||
"scale": {
|
||||
"heading": "スケール"
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "モード"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "モデル"
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "高さ"
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "ステップ"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "モード"
|
||||
},
|
||||
"paramSeed": {
|
||||
"heading": "シード"
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "生成回数"
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet"
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "幅"
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA"
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "重み"
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale"
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "重み"
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -854,8 +579,7 @@
|
||||
"coherenceTab": "コヒーレンスパス"
|
||||
},
|
||||
"advanced": {
|
||||
"title": "高度",
|
||||
"options": "$t(accordions.advanced.title) オプション"
|
||||
"title": "高度な設定"
|
||||
},
|
||||
"control": {
|
||||
"title": "コントロール"
|
||||
@@ -884,11 +608,7 @@
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"queue": "キュー",
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"models": "モデル",
|
||||
"gallery": "ギャラリー"
|
||||
"queue": "キュー"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
@@ -903,8 +623,7 @@
|
||||
"bboxGroup": "バウンディングボックスから作成",
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像",
|
||||
"canvasGroup": "キャンバス"
|
||||
"newRegionalReferenceImage": "新規領域参照画像"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
@@ -925,8 +644,7 @@
|
||||
"brush": "ブラシ",
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム",
|
||||
"bbox": "Bbox"
|
||||
"eraser": "消しゴム"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
@@ -944,27 +662,7 @@
|
||||
"canvas": "キャンバス",
|
||||
"fitBboxToLayers": "バウンディングボックスをレイヤーにフィット",
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました",
|
||||
"controlMode": {
|
||||
"prompt": "プロンプト"
|
||||
},
|
||||
"prompt": "プロンプト",
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"off": "オフ",
|
||||
"on": "オン"
|
||||
}
|
||||
},
|
||||
"filter": {
|
||||
"filter": "フィルター",
|
||||
"spandrel_filter": {
|
||||
"model": "モデル"
|
||||
},
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"cancel": "キャンセル"
|
||||
},
|
||||
"weight": "重み"
|
||||
"savedToGalleryOk": "ギャラリーに保存しました"
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
@@ -976,54 +674,15 @@
|
||||
"createPromptTemplate": "プロンプトテンプレートを作成",
|
||||
"promptTemplateCleared": "プロンプトテンプレートをクリアしました",
|
||||
"searchByName": "名前で検索",
|
||||
"toggleViewMode": "表示モードを切り替え",
|
||||
"negativePromptColumn": "'negative_prompt'",
|
||||
"preview": "プレビュー",
|
||||
"nameColumn": "'name'",
|
||||
"type": "タイプ",
|
||||
"private": "プライベート",
|
||||
"name": "名称"
|
||||
"toggleViewMode": "表示モードを切り替え"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール",
|
||||
"scale": "スケール"
|
||||
"upscale": "アップスケール"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"scheduler": "スケジューラー",
|
||||
"loading": "ロード中...",
|
||||
"steps": "ステップ",
|
||||
"refiner": "Refiner"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "モデルキャッシュを消去",
|
||||
"clearSucceeded": "モデルキャッシュを消去しました",
|
||||
"clearFailed": "モデルキャッシュの消去中に問題が発生"
|
||||
},
|
||||
"workflows": {
|
||||
"workflows": "ワークフロー",
|
||||
"ascending": "昇順",
|
||||
"name": "名前",
|
||||
"descending": "降順"
|
||||
},
|
||||
"system": {
|
||||
"logNamespaces": {
|
||||
"system": "システム",
|
||||
"gallery": "ギャラリー",
|
||||
"workflows": "ワークフロー",
|
||||
"models": "モデル",
|
||||
"canvas": "キャンバス",
|
||||
"metadata": "メタデータ",
|
||||
"queue": "キュー"
|
||||
},
|
||||
"logLevel": {
|
||||
"debug": "Debug",
|
||||
"info": "Info",
|
||||
"error": "Error",
|
||||
"fatal": "Fatal",
|
||||
"warn": "Warn"
|
||||
}
|
||||
"scheduler": "スケジューラー"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,8 +118,7 @@
|
||||
"compareHelp2": "Nhấn <Kbd>M</Kbd> để tuần hoàn trong chế độ so sánh.",
|
||||
"boardsSettings": "Thiết Lập Bảng",
|
||||
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
|
||||
"assets": "Tài Nguyên",
|
||||
"images": "Hình Ảnh"
|
||||
"assets": "Tài Nguyên"
|
||||
},
|
||||
"common": {
|
||||
"ipAdapter": "IP Adapter",
|
||||
@@ -234,8 +233,7 @@
|
||||
"combinatorial": "Tổ Hợp",
|
||||
"column": "Cột",
|
||||
"layout": "Bố Cục",
|
||||
"row": "Hàng",
|
||||
"board": "Bảng"
|
||||
"row": "Hàng"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
@@ -875,7 +873,7 @@
|
||||
"removeLinearView": "Xoá Khỏi Chế Độ Xem Tuyến Tính",
|
||||
"unknownErrorValidatingWorkflow": "Lỗi không rõ khi xác thực workflow",
|
||||
"unableToLoadWorkflow": "Không Thể Tải Workflow",
|
||||
"workflowSettings": "Cài Đặt Biên Tập Workflow",
|
||||
"workflowSettings": "Cài Đặt Trình Biên Tập Viên Workflow",
|
||||
"workflowVersion": "Phiên Bản",
|
||||
"unableToGetWorkflowVersion": "Không thể tìm phiên bản của lược đồ workflow",
|
||||
"collection": "Đa tài nguyên",
|
||||
@@ -1012,11 +1010,7 @@
|
||||
"loadWorkflowDesc2": "Workflow hiện tại của bạn có những điều chỉnh chưa được lưu.",
|
||||
"loadingTemplates": "Đang Tải {{name}}",
|
||||
"nodeName": "Tên Node",
|
||||
"unableToUpdateNode": "Cập nhật node thất bại: node {{node}} thuộc dạng {{type}} (có thể cần xóa và tạo lại)",
|
||||
"downloadWorkflowError": "Lỗi tải xuống workflow",
|
||||
"generatorImagesFromBoard": "Ảnh Từ Bảng",
|
||||
"generatorImagesCategory": "Phân Loại",
|
||||
"generatorImages_other": "{{count}} ảnh"
|
||||
"unableToUpdateNode": "Cập nhật node thất bại: node {{node}} thuộc dạng {{type}} (có thể cần xóa và tạo lại)"
|
||||
},
|
||||
"popovers": {
|
||||
"paramCFGRescaleMultiplier": {
|
||||
@@ -1500,9 +1494,7 @@
|
||||
"batchNodeCollectionSizeMismatch": "Kích cỡ tài nguyên không phù hợp với Lô {{batchGroupId}}",
|
||||
"emptyBatches": "lô trống",
|
||||
"batchNodeNotConnected": "Node Hàng Loạt chưa được kết nối: {{label}}",
|
||||
"batchNodeEmptyCollection": "Một vài node hàng loạt có tài nguyên rỗng",
|
||||
"collectionEmpty": "tài nguyên trống",
|
||||
"batchNodeCollectionSizeMismatchNoGroupId": "tài nguyên theo nhóm có kích thước sai lệch"
|
||||
"batchNodeEmptyCollection": "Một vài node hàng loạt có tài nguyên rỗng"
|
||||
},
|
||||
"cfgScale": "Thang CFG",
|
||||
"useSeed": "Dùng Hạt Giống",
|
||||
@@ -2167,7 +2159,7 @@
|
||||
"parameterSetDesc": "Gợi lại {{parameter}}",
|
||||
"loadedWithWarnings": "Đã Tải Workflow Với Cảnh Báo",
|
||||
"outOfMemoryErrorDesc": "Thiết lập tạo sinh hiện tại đã vượt mức cho phép của thiết bị. Hãy điều chỉnh thiết lập và thử lại.",
|
||||
"setNodeField": "Đặt làm vùng node",
|
||||
"setNodeField": "Đặt làm vùng cho node",
|
||||
"problemRetrievingWorkflow": "Có Vấn Đề Khi Lấy Lại Workflow",
|
||||
"somethingWentWrong": "Có Vấn Đề Phát Sinh",
|
||||
"problemDeletingWorkflow": "Có Vấn Đề Khi Xoá Workflow",
|
||||
@@ -2255,11 +2247,11 @@
|
||||
"workflowLibrary": "Thư Viện",
|
||||
"opened": "Ngày Mở",
|
||||
"deleteWorkflow": "Xoá Workflow",
|
||||
"workflowEditorMenu": "Menu Biên Tập Workflow",
|
||||
"workflowEditorMenu": "Menu Biên Tập Viên Workflow",
|
||||
"uploadAndSaveWorkflow": "Tải Lên Thư Viện",
|
||||
"openLibrary": "Mở Thư Viện",
|
||||
"builder": {
|
||||
"resetAllNodeFields": "Tải Lại Các Vùng Node",
|
||||
"resetAllNodeFields": "Khởi Động Lại Tất Cả Vùng Cho Node",
|
||||
"builder": "Trình Tạo Vùng Nhập",
|
||||
"layout": "Bố Cục",
|
||||
"row": "Hàng",
|
||||
@@ -2274,19 +2266,15 @@
|
||||
"slider": "Thanh Trượt",
|
||||
"both": "Cả Hai",
|
||||
"emptyRootPlaceholderViewMode": "Chọn Chỉnh Sửa để bắt đầu tạo nên một vùng nhập cho workflow này.",
|
||||
"emptyRootPlaceholderEditMode": "Kéo thành phần vùng nhập hoặc vùng node vào đây để bắt đầu.",
|
||||
"emptyRootPlaceholderEditMode": "Kéo thành phần vùng nhập hoặc vùng cho node vào đây để bắt đầu.",
|
||||
"containerPlaceholder": "Hộp Chứa Trống",
|
||||
"headingPlaceholder": "Đầu Dòng Trống",
|
||||
"textPlaceholder": "Văn Bản Trống",
|
||||
"textPlaceholder": "Mô Tả Trống",
|
||||
"column": "Cột",
|
||||
"deleteAllElements": "Xóa Tất Cả Thành Phần",
|
||||
"nodeField": "Vùng Node",
|
||||
"nodeFieldTooltip": "Để thêm vùng node, bấm vào dấu cộng nhỏ trên vùng trong Trình Biên Tập Workflow, hoặc kéo vùng theo tên của nó vào vùng nhập.",
|
||||
"workflowBuilderAlphaWarning": "Trình tạo vùng nhập đang trong giai đoạn alpha. Nó có thể xuất hiện những thay đổi đột ngột trước khi chính thức được phát hành.",
|
||||
"container": "Hộp Chứa",
|
||||
"heading": "Đầu Dòng",
|
||||
"text": "Văn Bản",
|
||||
"divider": "Gạch Chia"
|
||||
"deleteAllElements": "Xóa Tất Cả Thành Phần Vùng Nhập",
|
||||
"nodeField": "Vùng Cho Node",
|
||||
"nodeFieldTooltip": "Để thêm vùng cho node, bấm vào dấu cộng nhỏ trên vùng trong Vùng Biên Tập Workflow, hoặc kéo vùng theo tên của nó vào vùng nhập.",
|
||||
"workflowBuilderAlphaWarning": "Trình tạo workflow đang trong giai đoạn alpha. Nó có thể xuất hiện những thay đổi đột ngột trước khi chính thức được phát hành."
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -2322,8 +2310,12 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Trình Biên Tập Workflow: trình tạo vùng nhập dưới dạng kéo thả nhằm tạo dựng workflow dễ dàng hơn.",
|
||||
"Các nâng cấp khác: Xếp hàng tạo sinh theo nhóm nhanh hơn, upscale tốt hơn, trình chọn màu được cải thiện, và node chứa metadata."
|
||||
"Cải thiện các thiết lập mặc định của VRAM",
|
||||
"Xoá bộ nhớ đệm của model theo yêu cầu",
|
||||
"Mở rộng khả năng tương thích LoRA trên FLUX",
|
||||
"Bộ lọc điều chỉnh ảnh trên Canvas",
|
||||
"Huỷ tất cả trừ mục đang xếp hàng hiện tại",
|
||||
"Sao chép và dán trên Canvas"
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -3,7 +3,6 @@ import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { extractMessageFromAssertionError } from 'common/util/extractMessageFromAssertionError';
|
||||
import { withResult, withResultAsync } from 'common/util/result';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
|
||||
@@ -14,6 +13,7 @@ import { toast } from 'features/toast/toast';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import { assert, AssertionError } from 'tsafe';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('generation');
|
||||
|
||||
@@ -80,15 +80,16 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
req.reset();
|
||||
|
||||
try {
|
||||
await req.unwrap();
|
||||
log.debug(parseify({ batchConfig: prepareBatchResult.value }), 'Enqueued batch');
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
|
||||
} finally {
|
||||
req.reset();
|
||||
const enqueueResult = await withResultAsync(() => req.unwrap());
|
||||
|
||||
if (enqueueResult.isErr()) {
|
||||
log.error({ error: serializeError(enqueueResult.error) }, 'Failed to enqueue batch');
|
||||
return;
|
||||
}
|
||||
|
||||
log.debug({ batchConfig: prepareBatchResult.value } as JsonObject, 'Enqueued batch');
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { isBatchNode, isInvocationNode } from 'features/nodes/types/invocation';
|
||||
@@ -9,12 +7,9 @@ import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { resolveBatchValue } from 'features/nodes/util/node/resolveBatchValue';
|
||||
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
|
||||
import { groupBy } from 'lodash-es';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Batch, BatchConfig } from 'services/api/types';
|
||||
|
||||
const log = logger('generation');
|
||||
|
||||
export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||
@@ -106,9 +101,6 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
log.debug(parseify({ batchConfig }), 'Enqueued batch');
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
const log = logger('generation');
|
||||
|
||||
export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||
@@ -24,9 +19,6 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
log.debug(parseify({ batchConfig }), 'Enqueued batch');
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import type { Opts as LinkifyOpts } from 'linkifyjs';
|
||||
|
||||
export const linkifySx: SystemStyleObject = {
|
||||
a: {
|
||||
fontWeight: 'semibold',
|
||||
},
|
||||
'a:hover': {
|
||||
textDecoration: 'underline',
|
||||
},
|
||||
};
|
||||
|
||||
export const linkifyOptions: LinkifyOpts = {
|
||||
target: '_blank',
|
||||
rel: 'noopener noreferrer',
|
||||
validate: (value) => /^https?:\/\//.test(value),
|
||||
};
|
||||
@@ -128,11 +128,7 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
getInputProps: getUploadInputProps,
|
||||
open: openUploader,
|
||||
} = useDropzone({
|
||||
accept: {
|
||||
'image/png': ['.png'],
|
||||
'image/jpeg': ['.jpg', '.jpeg', '.png'],
|
||||
'image/webp': ['.webp'],
|
||||
},
|
||||
accept: { 'image/png': ['.png'], 'image/jpeg': ['.jpg', '.jpeg', '.png'] },
|
||||
onDropAccepted,
|
||||
onDropRejected,
|
||||
disabled: isDisabled,
|
||||
|
||||
@@ -2,7 +2,6 @@ import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoGlobalReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
@@ -28,7 +27,6 @@ export const IPAdapterSettingsEmptyState = memo(() => {
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const pullBboxIntoIPAdapter = usePullBboxIntoGlobalReferenceImage(entityIdentifier);
|
||||
|
||||
const dndTargetData = useMemo<SetGlobalReferenceImageDndTargetData>(
|
||||
() => setGlobalReferenceImageDndTarget.getData({ entityIdentifier }),
|
||||
@@ -43,11 +41,8 @@ export const IPAdapterSettingsEmptyState = memo(() => {
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
PullBboxButton: (
|
||||
<Button onClick={pullBboxIntoIPAdapter} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, pullBboxIntoIPAdapter, uploadApi]
|
||||
[isBusy, onClickGalleryButton, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
|
||||
@@ -2,7 +2,6 @@ import { Button, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoRegionalGuidanceReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { rgIPAdapterDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
@@ -37,7 +36,6 @@ export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImag
|
||||
const onDeleteIPAdapter = useCallback(() => {
|
||||
dispatch(rgIPAdapterDeleted({ entityIdentifier, referenceImageId }));
|
||||
}, [dispatch, entityIdentifier, referenceImageId]);
|
||||
const pullBboxIntoIPAdapter = usePullBboxIntoRegionalGuidanceReferenceImage(entityIdentifier, referenceImageId);
|
||||
|
||||
const dndTargetData = useMemo<SetRegionalGuidanceReferenceImageDndTargetData>(
|
||||
() =>
|
||||
@@ -48,21 +46,6 @@ export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImag
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
PullBboxButton: (
|
||||
<Button onClick={pullBboxIntoIPAdapter} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, pullBboxIntoIPAdapter, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={2} position="relative" w="full">
|
||||
<Flex alignItems="center" gap={2}>
|
||||
@@ -83,7 +66,23 @@ export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImag
|
||||
</Flex>
|
||||
<Flex alignItems="center" gap={2} p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans i18nKey="controlLayers.referenceImageEmptyState" components={components} />
|
||||
<Trans
|
||||
i18nKey="controlLayers.referenceImageEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
</Text>
|
||||
</Flex>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { CanvasState } from 'features/controlLayers/store/types';
|
||||
import { selectDeleteImageModalSlice } from 'features/deleteImageModal/store/slice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import type { NodesState } from 'features/nodes/store/types';
|
||||
import { isImageFieldCollectionInputInstance, isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import type { UpscaleState } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
@@ -13,23 +13,11 @@ import { some } from 'lodash-es';
|
||||
import type { ImageUsage } from './types';
|
||||
// TODO(psyche): handle image deletion (canvas staging area?)
|
||||
export const getImageUsage = (nodes: NodesState, canvas: CanvasState, upscale: UpscaleState, image_name: string) => {
|
||||
const isNodesImage = nodes.nodes.filter(isInvocationNode).some((node) =>
|
||||
some(node.data.inputs, (input) => {
|
||||
if (isImageFieldInputInstance(input)) {
|
||||
if (input.value?.image_name === image_name) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (isImageFieldCollectionInputInstance(input)) {
|
||||
if (input.value?.some((value) => value?.image_name === image_name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
})
|
||||
);
|
||||
const isNodesImage = nodes.nodes
|
||||
.filter(isInvocationNode)
|
||||
.some((node) =>
|
||||
some(node.data.inputs, (input) => isImageFieldInputInstance(input) && input.value?.image_name === image_name)
|
||||
);
|
||||
|
||||
const isUpscaleImage = upscale.upscaleInitialImage?.image_name === image_name;
|
||||
|
||||
|
||||
@@ -22,8 +22,8 @@ import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
import type { UploadImageArg } from 'services/api/types';
|
||||
import { z } from 'zod';
|
||||
|
||||
const ACCEPTED_IMAGE_TYPES = ['image/png', 'image/jpg', 'image/jpeg', 'image/webp'];
|
||||
const ACCEPTED_FILE_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.webp'];
|
||||
const ACCEPTED_IMAGE_TYPES = ['image/png', 'image/jpg', 'image/jpeg'];
|
||||
const ACCEPTED_FILE_EXTENSIONS = ['.png', '.jpg', '.jpeg'];
|
||||
|
||||
// const MAX_IMAGE_SIZE = 4; //In MegaBytes
|
||||
// const sizeInMB = (sizeInBytes: number, decimalsNum = 2) => {
|
||||
|
||||
@@ -72,11 +72,7 @@ const ModelImageUpload = ({ model_key, model_image }: Props) => {
|
||||
}, [model_key, t, deleteModelImage]);
|
||||
|
||||
const { getInputProps, getRootProps } = useDropzone({
|
||||
accept: {
|
||||
'image/png': ['.png'],
|
||||
'image/jpeg': ['.jpg', '.jpeg', '.png'],
|
||||
'image/webp': ['.webp'],
|
||||
},
|
||||
accept: { 'image/png': ['.png'], 'image/jpeg': ['.jpg', '.jpeg', '.png'] },
|
||||
onDropAccepted,
|
||||
noDrag: true,
|
||||
multiple: false,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { EMPTY_ARRAY } from 'app/store/constants';
|
||||
import { NO_DRAG_CLASS, NO_WHEEL_CLASS } from 'features/nodes/types/constants';
|
||||
import type { ImageGeneratorImagesFromBoard } from 'features/nodes/types/field';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@@ -50,21 +49,31 @@ const BoardCombobox = ({
|
||||
board_id,
|
||||
onChange: _onChange,
|
||||
}: {
|
||||
board_id: string | undefined;
|
||||
board_id: string;
|
||||
onChange: (board_id: string) => void;
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const listAllBoardsQuery = useListAllBoardsQuery(listAllBoardsQueryArg);
|
||||
|
||||
const noneOption = useMemo<ComboboxOption>(() => {
|
||||
return {
|
||||
label: `${t('common.none')} (${t('boards.uncategorized')})`,
|
||||
value: 'none',
|
||||
};
|
||||
}, [t]);
|
||||
|
||||
const options = useMemo<ComboboxOption[]>(() => {
|
||||
if (!listAllBoardsQuery.data) {
|
||||
return EMPTY_ARRAY;
|
||||
const _options: ComboboxOption[] = [noneOption];
|
||||
if (listAllBoardsQuery.data) {
|
||||
for (const board of listAllBoardsQuery.data) {
|
||||
_options.push({
|
||||
label: board.board_name,
|
||||
value: board.board_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
return listAllBoardsQuery.data.map((board) => ({
|
||||
label: board.board_name,
|
||||
value: board.board_id,
|
||||
}));
|
||||
}, [listAllBoardsQuery.data]);
|
||||
return _options;
|
||||
}, [listAllBoardsQuery.data, noneOption]);
|
||||
|
||||
const onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
@@ -78,7 +87,13 @@ const BoardCombobox = ({
|
||||
[_onChange]
|
||||
);
|
||||
|
||||
const value = useMemo(() => options.find((o) => o.value === board_id) ?? null, [board_id, options]);
|
||||
const value = useMemo(() => {
|
||||
if (board_id === 'none') {
|
||||
return noneOption;
|
||||
}
|
||||
const boardOption = options.find((o) => o.value === board_id);
|
||||
return boardOption ?? noneOption;
|
||||
}, [board_id, options, noneOption]);
|
||||
|
||||
const noOptionsMessage = useCallback(() => t('boards.noMatching'), [t]);
|
||||
|
||||
|
||||
@@ -1,21 +1,16 @@
|
||||
import { Flex, IconButton, Spacer } from '@invoke-ai/ui-library';
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import AddNodeButton from 'features/nodes/components/flow/panels/TopPanel/AddNodeButton';
|
||||
import ClearFlowButton from 'features/nodes/components/flow/panels/TopPanel/ClearFlowButton';
|
||||
import SaveWorkflowButton from 'features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton';
|
||||
import UpdateNodesButton from 'features/nodes/components/flow/panels/TopPanel/UpdateNodesButton';
|
||||
import { useWorkflowEditorSettingsModal } from 'features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings';
|
||||
import { WorkflowName } from 'features/nodes/components/sidePanel/WorkflowName';
|
||||
import { selectWorkflowName } from 'features/nodes/store/workflowSlice';
|
||||
import WorkflowLibraryMenu from 'features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiGearSixFill } from 'react-icons/pi';
|
||||
|
||||
const TopCenterPanel = () => {
|
||||
const name = useAppSelector(selectWorkflowName);
|
||||
const modal = useWorkflowEditorSettingsModal();
|
||||
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Flex gap={2} top={2} left={2} right={2} position="absolute" alignItems="flex-start" pointerEvents="none">
|
||||
<Flex gap="2">
|
||||
@@ -27,12 +22,7 @@ const TopCenterPanel = () => {
|
||||
<Spacer />
|
||||
<ClearFlowButton />
|
||||
<SaveWorkflowButton />
|
||||
<IconButton
|
||||
pointerEvents="auto"
|
||||
aria-label={t('workflows.workflowEditorMenu')}
|
||||
icon={<PiGearSixFill />}
|
||||
onClick={modal.setTrue}
|
||||
/>
|
||||
<WorkflowLibraryMenu />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -23,6 +23,7 @@ export const NewWorkflowButton = memo(() => {
|
||||
<IconButton
|
||||
onClick={onClickNewWorkflow}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
aria-label={t('nodes.newWorkflow')}
|
||||
tooltip={t('nodes.newWorkflow')}
|
||||
icon={<PiFilePlusBold />}
|
||||
@@ -1,8 +1,6 @@
|
||||
import { Text } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { linkifyOptions, linkifySx } from 'common/components/linkify';
|
||||
import { selectWorkflowDescription } from 'features/nodes/store/workflowSlice';
|
||||
import Linkify from 'linkify-react';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const ActiveWorkflowDescription = memo(() => {
|
||||
@@ -13,8 +11,8 @@ export const ActiveWorkflowDescription = memo(() => {
|
||||
}
|
||||
|
||||
return (
|
||||
<Text color="base.300" fontStyle="italic" pb={2} sx={linkifySx}>
|
||||
<Linkify options={linkifyOptions}>{description}</Linkify>
|
||||
<Text color="base.300" fontStyle="italic" noOfLines={1} pb={2}>
|
||||
{description}
|
||||
</Text>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { NewWorkflowButton } from 'features/nodes/components/sidePanel/NewWorkflowButton';
|
||||
import { WorkflowListMenuTrigger } from 'features/nodes/components/sidePanel/WorkflowListMenu/WorkflowListMenuTrigger';
|
||||
import { WorkflowViewEditToggleButton } from 'features/nodes/components/sidePanel/WorkflowViewEditToggleButton';
|
||||
import { selectWorkflowMode } from 'features/nodes/store/workflowSlice';
|
||||
import { WorkflowLibraryMenu } from 'features/workflowLibrary/components/WorkflowLibraryMenu/WorkflowLibraryMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
import SaveWorkflowButton from './SaveWorkflowButton';
|
||||
@@ -17,7 +17,7 @@ export const ActiveWorkflowNameAndActions = memo(() => {
|
||||
<Spacer />
|
||||
{mode === 'edit' && <SaveWorkflowButton />}
|
||||
<WorkflowViewEditToggleButton />
|
||||
<WorkflowLibraryMenu />
|
||||
<NewWorkflowButton />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -6,7 +6,7 @@ import dateFormat, { masks } from 'dateformat';
|
||||
import { selectWorkflowId } from 'features/nodes/store/workflowSlice';
|
||||
import { useDeleteWorkflow } from 'features/workflowLibrary/components/DeleteLibraryWorkflowConfirmationAlertDialog';
|
||||
import { useLoadWorkflow } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
||||
import { useDownloadWorkflowById } from 'features/workflowLibrary/hooks/useDownloadWorkflowById';
|
||||
import { useDownloadWorkflow } from 'features/workflowLibrary/hooks/useDownloadWorkflow';
|
||||
import type { MouseEvent } from 'react';
|
||||
import { useCallback, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -30,7 +30,7 @@ export const WorkflowListItem = ({ workflow }: { workflow: WorkflowRecordListIte
|
||||
}, []);
|
||||
|
||||
const workflowId = useAppSelector(selectWorkflowId);
|
||||
const { downloadWorkflow, isLoading: isLoadingDownloadWorkflow } = useDownloadWorkflowById();
|
||||
const downloadWorkflow = useDownloadWorkflow();
|
||||
const shareWorkflow = useShareWorkflow();
|
||||
const deleteWorkflow = useDeleteWorkflow();
|
||||
const loadWorkflow = useLoadWorkflow();
|
||||
@@ -71,9 +71,9 @@ export const WorkflowListItem = ({ workflow }: { workflow: WorkflowRecordListIte
|
||||
(e: MouseEvent<HTMLButtonElement>) => {
|
||||
e.stopPropagation();
|
||||
setIsHovered(false);
|
||||
downloadWorkflow(workflow.workflow_id);
|
||||
downloadWorkflow();
|
||||
},
|
||||
[downloadWorkflow, workflow.workflow_id]
|
||||
[downloadWorkflow]
|
||||
);
|
||||
|
||||
return (
|
||||
@@ -144,7 +144,6 @@ export const WorkflowListItem = ({ workflow }: { workflow: WorkflowRecordListIte
|
||||
aria-label={t('workflows.download')}
|
||||
onClick={handleClickDownload}
|
||||
icon={<PiDownloadSimpleBold />}
|
||||
isLoading={isLoadingDownloadWorkflow}
|
||||
/>
|
||||
</Tooltip>
|
||||
{!!projectUrl && workflow.workflow_id && workflow.category !== 'user' && (
|
||||
|
||||
@@ -15,7 +15,6 @@ import { useAppSelector } from 'app/store/storeHooks';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { useWorkflowListMenu } from 'features/nodes/store/workflowListMenu';
|
||||
import { selectWorkflowName } from 'features/nodes/store/workflowSlice';
|
||||
import { NewWorkflowButton } from 'features/workflowLibrary/components/NewWorkflowButton';
|
||||
import UploadWorkflowButton from 'features/workflowLibrary/components/UploadWorkflowButton';
|
||||
import { useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -64,7 +63,6 @@ export const WorkflowListMenuTrigger = () => {
|
||||
<WorkflowSearch searchInputRef={searchInputRef} />
|
||||
<WorkflowSortControl />
|
||||
<UploadWorkflowButton />
|
||||
<NewWorkflowButton />
|
||||
</Flex>
|
||||
<Box position="relative" w="full" h="full">
|
||||
<ScrollableContent>
|
||||
|
||||
@@ -50,7 +50,7 @@ const ContainerElement = memo(({ id }: { id: string }) => {
|
||||
ContainerElement.displayName = 'ContainerElementComponent';
|
||||
|
||||
const containerViewModeSx: SystemStyleObject = {
|
||||
gap: 2,
|
||||
gap: 4,
|
||||
'&[data-self-layout="column"]': {
|
||||
flexDir: 'column',
|
||||
alignItems: 'stretch',
|
||||
@@ -197,7 +197,7 @@ const rootViewModeSx: SystemStyleObject = {
|
||||
borderRadius: 'base',
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
gap: 2,
|
||||
gap: 4,
|
||||
display: 'flex',
|
||||
flex: 1,
|
||||
maxW: '768px',
|
||||
@@ -232,7 +232,6 @@ RootContainerElementViewMode.displayName = 'RootContainerElementViewMode';
|
||||
|
||||
const rootEditModeSx: SystemStyleObject = {
|
||||
...rootViewModeSx,
|
||||
gap: 4,
|
||||
'&[data-is-dragging-over="true"]': {
|
||||
opacity: 1,
|
||||
bg: 'base.850',
|
||||
|
||||
@@ -9,7 +9,6 @@ import {
|
||||
PopoverBody,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
Portal,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { formElementContainerDataChanged } from 'features/nodes/store/workflowSlice';
|
||||
@@ -37,24 +36,22 @@ export const ContainerElementSettings = memo(({ element }: { element: ContainerE
|
||||
<PopoverTrigger>
|
||||
<IconButton aria-label="settings" icon={<PiWrenchFill />} variant="link" size="sm" alignSelf="stretch" />
|
||||
</PopoverTrigger>
|
||||
<Portal>
|
||||
<PopoverContent>
|
||||
<PopoverArrow />
|
||||
<PopoverBody>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('workflows.builder.layout')}</FormLabel>
|
||||
<ButtonGroup variant="outline" size="sm">
|
||||
<Button onClick={setLayoutToRow} colorScheme={layout === 'row' ? 'invokeBlue' : 'base'}>
|
||||
{t('workflows.builder.row')}
|
||||
</Button>
|
||||
<Button onClick={setLayoutToColumn} colorScheme={layout === 'column' ? 'invokeBlue' : 'base'}>
|
||||
{t('workflows.builder.column')}
|
||||
</Button>
|
||||
</ButtonGroup>
|
||||
</FormControl>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Portal>
|
||||
<PopoverContent>
|
||||
<PopoverArrow />
|
||||
<PopoverBody>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('workflows.builder.layout')}</FormLabel>
|
||||
<ButtonGroup variant="outline" size="sm">
|
||||
<Button onClick={setLayoutToRow} colorScheme={layout === 'row' ? 'invokeBlue' : 'base'}>
|
||||
{t('workflows.builder.row')}
|
||||
</Button>
|
||||
<Button onClick={setLayoutToColumn} colorScheme={layout === 'column' ? 'invokeBlue' : 'base'}>
|
||||
{t('workflows.builder.column')}
|
||||
</Button>
|
||||
</ButtonGroup>
|
||||
</FormControl>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import type { HeadingProps, SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Text } from '@invoke-ai/ui-library';
|
||||
import { linkifyOptions, linkifySx } from 'common/components/linkify';
|
||||
import Linkify from 'linkify-react';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -11,14 +9,13 @@ const headingSx: SystemStyleObject = {
|
||||
'&[data-is-empty="true"]': {
|
||||
opacity: 0.3,
|
||||
},
|
||||
...linkifySx,
|
||||
};
|
||||
|
||||
export const HeadingElementContent = memo(({ content, ...rest }: { content: string } & HeadingProps) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Text sx={headingSx} data-is-empty={content === ''} {...rest}>
|
||||
<Linkify options={linkifyOptions}>{content || t('workflows.builder.headingPlaceholder')}</Linkify>
|
||||
{content || t('workflows.builder.headingPlaceholder')}
|
||||
</Text>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import { FormHelperText, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { linkifyOptions, linkifySx } from 'common/components/linkify';
|
||||
import { useEditable } from 'common/hooks/useEditable';
|
||||
import { useInputFieldDescription } from 'features/nodes/hooks/useInputFieldDescription';
|
||||
import { useInputFieldTemplate } from 'features/nodes/hooks/useInputFieldTemplate';
|
||||
import { fieldDescriptionChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type { NodeFieldElement } from 'features/nodes/types/workflow';
|
||||
import Linkify from 'linkify-react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
|
||||
export const NodeFieldElementDescriptionEditable = memo(({ el }: { el: NodeFieldElement }) => {
|
||||
@@ -38,11 +36,7 @@ export const NodeFieldElementDescriptionEditable = memo(({ el }: { el: NodeField
|
||||
});
|
||||
|
||||
if (!editable.isEditing) {
|
||||
return (
|
||||
<FormHelperText onDoubleClick={editable.startEditing} sx={linkifySx}>
|
||||
<Linkify options={linkifyOptions}>{editable.value}</Linkify>
|
||||
</FormHelperText>
|
||||
);
|
||||
return <FormHelperText onDoubleClick={editable.startEditing}>{editable.value}</FormHelperText>;
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -7,7 +7,6 @@ import {
|
||||
PopoverBody,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
Portal,
|
||||
Switch,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
@@ -66,20 +65,18 @@ export const NodeFieldElementSettings = memo(({ element }: { element: NodeFieldE
|
||||
<PopoverTrigger>
|
||||
<IconButton aria-label="settings" icon={<PiWrenchFill />} variant="link" size="sm" alignSelf="stretch" />
|
||||
</PopoverTrigger>
|
||||
<Portal>
|
||||
<PopoverContent>
|
||||
<PopoverArrow />
|
||||
<PopoverBody minW={48}>
|
||||
<FormControl>
|
||||
<FormLabel flex={1}>{t('workflows.builder.showDescription')}</FormLabel>
|
||||
<Switch size="sm" isChecked={showDescription} onChange={toggleShowDescription} />
|
||||
</FormControl>
|
||||
{settings?.type === 'integer-field-config' && <NodeFieldElementIntegerConfig id={id} config={settings} />}
|
||||
{settings?.type === 'float-field-config' && <NodeFieldElementFloatSettings id={id} config={settings} />}
|
||||
{settings?.type === 'string-field-config' && <NodeFieldElementStringSettings id={id} config={settings} />}
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Portal>
|
||||
<PopoverContent>
|
||||
<PopoverArrow />
|
||||
<PopoverBody minW={48}>
|
||||
<FormControl>
|
||||
<FormLabel flex={1}>{t('workflows.builder.showDescription')}</FormLabel>
|
||||
<Switch size="sm" isChecked={showDescription} onChange={toggleShowDescription} />
|
||||
</FormControl>
|
||||
{settings?.type === 'integer-field-config' && <NodeFieldElementIntegerConfig id={id} config={settings} />}
|
||||
{settings?.type === 'float-field-config' && <NodeFieldElementFloatSettings id={id} config={settings} />}
|
||||
{settings?.type === 'string-field-config' && <NodeFieldElementStringSettings id={id} config={settings} />}
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Flex, FormControl, FormHelperText } from '@invoke-ai/ui-library';
|
||||
import { linkifyOptions, linkifySx } from 'common/components/linkify';
|
||||
import { InputFieldRenderer } from 'features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer';
|
||||
import { useContainerContext } from 'features/nodes/components/sidePanel/builder/contexts';
|
||||
import { NodeFieldElementLabel } from 'features/nodes/components/sidePanel/builder/NodeFieldElementLabel';
|
||||
@@ -8,7 +7,6 @@ import { useInputFieldDescription } from 'features/nodes/hooks/useInputFieldDesc
|
||||
import { useInputFieldTemplate } from 'features/nodes/hooks/useInputFieldTemplate';
|
||||
import type { NodeFieldElement } from 'features/nodes/types/workflow';
|
||||
import { NODE_FIELD_CLASS_NAME } from 'features/nodes/types/workflow';
|
||||
import Linkify from 'linkify-react';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
const sx: SystemStyleObject = {
|
||||
@@ -20,9 +18,6 @@ const sx: SystemStyleObject = {
|
||||
flex: '1 1 0',
|
||||
minW: 32,
|
||||
},
|
||||
'&[data-with-description="false"]': {
|
||||
pb: 2,
|
||||
},
|
||||
};
|
||||
|
||||
export const NodeFieldElementViewMode = memo(({ el }: { el: NodeFieldElement }) => {
|
||||
@@ -38,13 +33,7 @@ export const NodeFieldElementViewMode = memo(({ el }: { el: NodeFieldElement })
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
id={id}
|
||||
className={NODE_FIELD_CLASS_NAME}
|
||||
sx={sx}
|
||||
data-parent-layout={containerCtx.layout}
|
||||
data-with-description={showDescription && !!_description}
|
||||
>
|
||||
<Flex id={id} className={NODE_FIELD_CLASS_NAME} sx={sx} data-parent-layout={containerCtx.layout}>
|
||||
<FormControl flex="1 1 0" orientation="vertical">
|
||||
<NodeFieldElementLabel el={el} />
|
||||
<Flex w="full" gap={4}>
|
||||
@@ -54,11 +43,7 @@ export const NodeFieldElementViewMode = memo(({ el }: { el: NodeFieldElement })
|
||||
settings={data.settings}
|
||||
/>
|
||||
</Flex>
|
||||
{showDescription && _description && (
|
||||
<FormHelperText sx={linkifySx}>
|
||||
<Linkify options={linkifyOptions}>{_description}</Linkify>
|
||||
</FormHelperText>
|
||||
)}
|
||||
{showDescription && _description && <FormHelperText>{_description}</FormHelperText>}
|
||||
</FormControl>
|
||||
</Flex>
|
||||
);
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import type { SystemStyleObject, TextProps } from '@invoke-ai/ui-library';
|
||||
import { Text } from '@invoke-ai/ui-library';
|
||||
import { linkifyOptions, linkifySx } from 'common/components/linkify';
|
||||
import Linkify from 'linkify-react';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -11,14 +9,13 @@ const textSx: SystemStyleObject = {
|
||||
'&[data-is-empty="true"]': {
|
||||
opacity: 0.3,
|
||||
},
|
||||
...linkifySx,
|
||||
};
|
||||
|
||||
export const TextElementContent = memo(({ content, ...rest }: { content: string } & TextProps) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Text sx={textSx} data-is-empty={content === ''} {...rest}>
|
||||
<Linkify options={linkifyOptions}>{content || t('workflows.builder.textPlaceholder')}</Linkify>
|
||||
{content || t('workflows.builder.textPlaceholder')}
|
||||
</Text>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -14,7 +14,8 @@ import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectIsFormEmpty } from 'features/nodes/store/workflowSlice';
|
||||
import type { FormElement } from 'features/nodes/types/workflow';
|
||||
import { buildContainer, buildDivider, buildHeading, buildText } from 'features/nodes/types/workflow';
|
||||
import type { PropsWithChildren, RefObject } from 'react';
|
||||
import { startCase } from 'lodash-es';
|
||||
import type { RefObject } from 'react';
|
||||
import { memo, useEffect, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
@@ -38,10 +39,10 @@ export const WorkflowBuilder = memo(() => {
|
||||
<Flex justifyContent="center" w="full" h="full">
|
||||
<Flex flexDir="column" w="full" maxW="768px" gap={2}>
|
||||
<Flex w="full" alignItems="center" gap={2} pt={3}>
|
||||
<AddFormElementDndButton type="container">{t('workflows.builder.container')}</AddFormElementDndButton>
|
||||
<AddFormElementDndButton type="divider">{t('workflows.builder.divider')}</AddFormElementDndButton>
|
||||
<AddFormElementDndButton type="heading">{t('workflows.builder.heading')}</AddFormElementDndButton>
|
||||
<AddFormElementDndButton type="text">{t('workflows.builder.text')}</AddFormElementDndButton>
|
||||
<AddFormElementDndButton type="container" />
|
||||
<AddFormElementDndButton type="divider" />
|
||||
<AddFormElementDndButton type="heading" />
|
||||
<AddFormElementDndButton type="text" />
|
||||
<Button size="sm" variant="ghost" tooltip={t('workflows.builder.nodeFieldTooltip')}>
|
||||
{t('workflows.builder.nodeField')}
|
||||
</Button>
|
||||
@@ -129,17 +130,14 @@ const addFormElementButtonSx: SystemStyleObject = {
|
||||
_disabled: { borderStyle: 'dashed', opacity: 0.5 },
|
||||
};
|
||||
|
||||
const AddFormElementDndButton = ({
|
||||
type,
|
||||
children,
|
||||
}: PropsWithChildren<{ type: Parameters<typeof useAddFormElementDnd>[0] }>) => {
|
||||
const AddFormElementDndButton = ({ type }: { type: Parameters<typeof useAddFormElementDnd>[0] }) => {
|
||||
const draggableRef = useRef<HTMLDivElement>(null);
|
||||
const isDragging = useAddFormElementDnd(type, draggableRef);
|
||||
|
||||
return (
|
||||
// Must be as div for draggable to work correctly
|
||||
<Button as="div" ref={draggableRef} size="sm" isDisabled={isDragging} variant="outline" sx={addFormElementButtonSx}>
|
||||
{children}
|
||||
{startCase(type)}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -331,25 +331,14 @@ const buildInstanceTypeGuard = <T extends z.ZodTypeAny>(schema: T) => {
|
||||
return (val: unknown): val is z.infer<T> => schema.safeParse(val).success;
|
||||
};
|
||||
|
||||
/**
|
||||
* Builds a type guard for a specific field input template type.
|
||||
*
|
||||
* The output type guards are primarily used for determining which input component to render for fields in the
|
||||
* <InputFieldRenderer/> component.
|
||||
*
|
||||
* @param name The name of the field type.
|
||||
* @param cardinalities The allowed cardinalities for the field type. If omitted, all cardinalities are allowed.
|
||||
*
|
||||
* @returns A type guard for the specified field type.
|
||||
*/
|
||||
const buildTemplateTypeGuard =
|
||||
<T extends FieldInputTemplate>(name: string, cardinalities?: FieldType['cardinality'][]) =>
|
||||
<T extends FieldInputTemplate>(name: string, cardinality?: 'SINGLE' | 'COLLECTION' | 'SINGLE_OR_COLLECTION') =>
|
||||
(template: FieldInputTemplate): template is T => {
|
||||
if (template.type.name !== name) {
|
||||
return false;
|
||||
}
|
||||
if (cardinalities) {
|
||||
return cardinalities.includes(template.type.cardinality);
|
||||
if (cardinality) {
|
||||
return template.type.cardinality === cardinality;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
@@ -377,10 +366,7 @@ export type IntegerFieldValue = z.infer<typeof zIntegerFieldValue>;
|
||||
export type IntegerFieldInputInstance = z.infer<typeof zIntegerFieldInputInstance>;
|
||||
export type IntegerFieldInputTemplate = z.infer<typeof zIntegerFieldInputTemplate>;
|
||||
export const isIntegerFieldInputInstance = buildInstanceTypeGuard(zIntegerFieldInputInstance);
|
||||
export const isIntegerFieldInputTemplate = buildTemplateTypeGuard<IntegerFieldInputTemplate>('IntegerField', [
|
||||
'SINGLE',
|
||||
'SINGLE_OR_COLLECTION',
|
||||
]);
|
||||
export const isIntegerFieldInputTemplate = buildTemplateTypeGuard<IntegerFieldInputTemplate>('IntegerField', 'SINGLE');
|
||||
// #endregion
|
||||
|
||||
// #region IntegerField Collection
|
||||
@@ -420,7 +406,7 @@ export type IntegerFieldCollectionInputTemplate = z.infer<typeof zIntegerFieldCo
|
||||
export const isIntegerFieldCollectionInputInstance = buildInstanceTypeGuard(zIntegerFieldCollectionInputInstance);
|
||||
export const isIntegerFieldCollectionInputTemplate = buildTemplateTypeGuard<IntegerFieldCollectionInputTemplate>(
|
||||
'IntegerField',
|
||||
['COLLECTION']
|
||||
'COLLECTION'
|
||||
);
|
||||
// #endregion
|
||||
|
||||
@@ -446,10 +432,7 @@ export type FloatFieldValue = z.infer<typeof zFloatFieldValue>;
|
||||
export type FloatFieldInputInstance = z.infer<typeof zFloatFieldInputInstance>;
|
||||
export type FloatFieldInputTemplate = z.infer<typeof zFloatFieldInputTemplate>;
|
||||
export const isFloatFieldInputInstance = buildInstanceTypeGuard(zFloatFieldInputInstance);
|
||||
export const isFloatFieldInputTemplate = buildTemplateTypeGuard<FloatFieldInputTemplate>('FloatField', [
|
||||
'SINGLE',
|
||||
'SINGLE_OR_COLLECTION',
|
||||
]);
|
||||
export const isFloatFieldInputTemplate = buildTemplateTypeGuard<FloatFieldInputTemplate>('FloatField', 'SINGLE');
|
||||
// #endregion
|
||||
|
||||
// #region FloatField Collection
|
||||
@@ -488,7 +471,7 @@ export type FloatFieldCollectionInputTemplate = z.infer<typeof zFloatFieldCollec
|
||||
export const isFloatFieldCollectionInputInstance = buildInstanceTypeGuard(zFloatFieldCollectionInputInstance);
|
||||
export const isFloatFieldCollectionInputTemplate = buildTemplateTypeGuard<FloatFieldCollectionInputTemplate>(
|
||||
'FloatField',
|
||||
['COLLECTION']
|
||||
'COLLECTION'
|
||||
);
|
||||
// #endregion
|
||||
|
||||
@@ -521,10 +504,7 @@ export type StringFieldValue = z.infer<typeof zStringFieldValue>;
|
||||
export type StringFieldInputInstance = z.infer<typeof zStringFieldInputInstance>;
|
||||
export type StringFieldInputTemplate = z.infer<typeof zStringFieldInputTemplate>;
|
||||
export const isStringFieldInputInstance = buildInstanceTypeGuard(zStringFieldInputInstance);
|
||||
export const isStringFieldInputTemplate = buildTemplateTypeGuard<StringFieldInputTemplate>('StringField', [
|
||||
'SINGLE',
|
||||
'SINGLE_OR_COLLECTION',
|
||||
]);
|
||||
export const isStringFieldInputTemplate = buildTemplateTypeGuard<StringFieldInputTemplate>('StringField', 'SINGLE');
|
||||
// #endregion
|
||||
|
||||
// #region StringField Collection
|
||||
@@ -570,7 +550,7 @@ export type StringFieldCollectionInputTemplate = z.infer<typeof zStringFieldColl
|
||||
export const isStringFieldCollectionInputInstance = buildInstanceTypeGuard(zStringFieldCollectionInputInstance);
|
||||
export const isStringFieldCollectionInputTemplate = buildTemplateTypeGuard<StringFieldCollectionInputTemplate>(
|
||||
'StringField',
|
||||
['COLLECTION']
|
||||
'COLLECTION'
|
||||
);
|
||||
// #endregion
|
||||
|
||||
@@ -633,10 +613,7 @@ export type ImageFieldValue = z.infer<typeof zImageFieldValue>;
|
||||
export type ImageFieldInputInstance = z.infer<typeof zImageFieldInputInstance>;
|
||||
export type ImageFieldInputTemplate = z.infer<typeof zImageFieldInputTemplate>;
|
||||
export const isImageFieldInputInstance = buildInstanceTypeGuard(zImageFieldInputInstance);
|
||||
export const isImageFieldInputTemplate = buildTemplateTypeGuard<ImageFieldInputTemplate>('ImageField', [
|
||||
'SINGLE',
|
||||
'SINGLE_OR_COLLECTION',
|
||||
]);
|
||||
export const isImageFieldInputTemplate = buildTemplateTypeGuard<ImageFieldInputTemplate>('ImageField', 'SINGLE');
|
||||
// #endregion
|
||||
|
||||
// #region ImageField Collection
|
||||
@@ -671,7 +648,7 @@ export type ImageFieldCollectionInputTemplate = z.infer<typeof zImageFieldCollec
|
||||
export const isImageFieldCollectionInputInstance = buildInstanceTypeGuard(zImageFieldCollectionInputInstance);
|
||||
export const isImageFieldCollectionInputTemplate = buildTemplateTypeGuard<ImageFieldCollectionInputTemplate>(
|
||||
'ImageField',
|
||||
['COLLECTION']
|
||||
'COLLECTION'
|
||||
);
|
||||
// #endregion
|
||||
|
||||
@@ -1557,7 +1534,7 @@ export const getStringGeneratorDefaults = (type: StringGeneratorFieldValue['type
|
||||
export const ImageGeneratorImagesFromBoardType = 'image_generator_images_from_board';
|
||||
const zImageGeneratorImagesFromBoard = z.object({
|
||||
type: z.literal(ImageGeneratorImagesFromBoardType).default(ImageGeneratorImagesFromBoardType),
|
||||
board_id: z.string().trim().min(1).optional(),
|
||||
board_id: z.string().trim().min(1).optional().default('none'),
|
||||
category: z.union([z.literal('images'), z.literal('assets')]).default('images'),
|
||||
});
|
||||
export type ImageGeneratorImagesFromBoard = z.infer<typeof zImageGeneratorImagesFromBoard>;
|
||||
@@ -1567,13 +1544,10 @@ const getImageGeneratorImagesFromBoardValues = async (
|
||||
dispatch: AppDispatch
|
||||
) => {
|
||||
const { board_id, category } = generator;
|
||||
if (!board_id) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
const req = dispatch(
|
||||
boardsApi.endpoints.listAllImageNamesForBoard.initiate(
|
||||
{
|
||||
board_id,
|
||||
board_id: board_id ?? 'none',
|
||||
categories: category === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES,
|
||||
is_intermediate: false,
|
||||
},
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useDownloadCurrentlyLoadedWorkflow } from 'features/workflowLibrary/hooks/useDownloadCurrentlyLoadedWorkflow';
|
||||
import { useDownloadWorkflow } from 'features/workflowLibrary/hooks/useDownloadWorkflow';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDownloadSimpleBold } from 'react-icons/pi';
|
||||
|
||||
const DownloadWorkflowMenuItem = () => {
|
||||
const { t } = useTranslation();
|
||||
const downloadWorkflow = useDownloadCurrentlyLoadedWorkflow();
|
||||
const downloadWorkflow = useDownloadWorkflow();
|
||||
|
||||
return (
|
||||
<MenuItem as="button" icon={<PiDownloadSimpleBold />} onClick={downloadWorkflow}>
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useWorkflowEditorSettingsModal } from 'features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiGearSixFill } from 'react-icons/pi';
|
||||
|
||||
const DownloadWorkflowMenuItem = () => {
|
||||
const { t } = useTranslation();
|
||||
const modal = useWorkflowEditorSettingsModal();
|
||||
|
||||
return (
|
||||
<MenuItem as="button" icon={<PiGearSixFill />} onClick={modal.setTrue}>
|
||||
{t('nodes.workflowSettings')}
|
||||
</MenuItem>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(DownloadWorkflowMenuItem);
|
||||
@@ -13,12 +13,13 @@ import LoadWorkflowFromGraphMenuItem from 'features/workflowLibrary/components/W
|
||||
import { NewWorkflowMenuItem } from 'features/workflowLibrary/components/WorkflowLibraryMenu/NewWorkflowMenuItem';
|
||||
import SaveWorkflowAsMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowAsMenuItem';
|
||||
import SaveWorkflowMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem';
|
||||
import SettingsMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/SettingsMenuItem';
|
||||
import UploadWorkflowMenuItem from 'features/workflowLibrary/components/WorkflowLibraryMenu/UploadWorkflowMenuItem';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDotsThreeOutlineFill } from 'react-icons/pi';
|
||||
|
||||
export const WorkflowLibraryMenu = memo(() => {
|
||||
const WorkflowLibraryMenu = () => {
|
||||
const { t } = useTranslation();
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
const shift = useShiftModifier();
|
||||
@@ -30,8 +31,6 @@ export const WorkflowLibraryMenu = memo(() => {
|
||||
aria-label={t('workflows.workflowEditorMenu')}
|
||||
icon={<PiDotsThreeOutlineFill />}
|
||||
pointerEvents="auto"
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
/>
|
||||
<MenuList pointerEvents="auto">
|
||||
<NewWorkflowMenuItem />
|
||||
@@ -40,10 +39,13 @@ export const WorkflowLibraryMenu = memo(() => {
|
||||
<SaveWorkflowMenuItem />
|
||||
<SaveWorkflowAsMenuItem />
|
||||
<DownloadWorkflowMenuItem />
|
||||
<MenuDivider />
|
||||
<SettingsMenuItem />
|
||||
{shift && <MenuDivider />}
|
||||
{shift && <LoadWorkflowFromGraphMenuItem />}
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
WorkflowLibraryMenu.displayName = 'WorkflowLibraryMenu';
|
||||
};
|
||||
|
||||
export default memo(WorkflowLibraryMenu);
|
||||
|
||||
@@ -3,7 +3,7 @@ import { $builtWorkflow } from 'features/nodes/hooks/useWorkflowWatcher';
|
||||
import { workflowDownloaded } from 'features/workflowLibrary/store/actions';
|
||||
import { useCallback } from 'react';
|
||||
|
||||
export const useDownloadCurrentlyLoadedWorkflow = () => {
|
||||
export const useDownloadWorkflow = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const downloadWorkflow = useCallback(() => {
|
||||
@@ -1,42 +0,0 @@
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { workflowDownloaded } from 'features/workflowLibrary/store/actions';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useLazyGetWorkflowQuery } from 'services/api/endpoints/workflows';
|
||||
|
||||
export const useDownloadWorkflowById = () => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const [trigger, query] = useLazyGetWorkflowQuery();
|
||||
|
||||
const toastError = useCallback(() => {
|
||||
toast({ status: 'error', description: t('nodes.downloadWorkflowError') });
|
||||
}, [t]);
|
||||
|
||||
const downloadWorkflow = useCallback(
|
||||
async (workflowId: string) => {
|
||||
try {
|
||||
const { data } = await trigger(workflowId);
|
||||
if (!data) {
|
||||
toastError();
|
||||
return;
|
||||
}
|
||||
const { workflow } = data;
|
||||
const blob = new Blob([JSON.stringify(workflow, null, 2)]);
|
||||
const a = document.createElement('a');
|
||||
a.href = URL.createObjectURL(blob);
|
||||
a.download = `${workflow.name || 'My Workflow'}.json`;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
a.remove();
|
||||
dispatch(workflowDownloaded());
|
||||
} catch {
|
||||
toastError();
|
||||
}
|
||||
},
|
||||
[dispatch, toastError, trigger]
|
||||
);
|
||||
|
||||
return { downloadWorkflow, isLoading: query.isLoading };
|
||||
};
|
||||
@@ -103,7 +103,6 @@ export const api = customCreateApi({
|
||||
reducerPath: 'api',
|
||||
tagTypes,
|
||||
endpoints: () => ({}),
|
||||
invalidationBehavior: 'immediately',
|
||||
});
|
||||
|
||||
function getCircularReplacer() {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
__version__ = "5.7.2"
|
||||
__version__ = "5.7.0rc1"
|
||||
|
||||
@@ -101,7 +101,8 @@ dependencies = [
|
||||
"xformers" = [
|
||||
# Core generation dependencies, pinned for reproducible builds.
|
||||
"xformers>=0.0.28.post1; sys_platform!='darwin'",
|
||||
# torch 2.4+cu carries its own triton dependency
|
||||
# Auxiliary dependencies, pinned only if necessary.
|
||||
"triton; sys_platform=='linux'",
|
||||
]
|
||||
"onnx" = ["onnxruntime"]
|
||||
"onnx-cuda" = ["onnxruntime-gpu"]
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.dangerously_run_function_in_subprocess import dangerously_run_function_in_subprocess
|
||||
|
||||
# These tests are a bit fiddly, because the depend on the import behaviour of torch. They use subprocesses to isolate
|
||||
# the import behaviour of torch, and then check that the function behaves as expected. We have to hack in some logging
|
||||
# to check that the tested function is behaving as expected.
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA device.")
|
||||
def test_configure_torch_cuda_allocator_configures_backend():
|
||||
"""Test that configure_torch_cuda_allocator() raises a RuntimeError if the configured backend does not match the
|
||||
expected backend."""
|
||||
|
||||
def test_func():
|
||||
import os
|
||||
|
||||
# Unset the environment variable if it is set so that we can test setting it
|
||||
try:
|
||||
del os.environ["PYTORCH_CUDA_ALLOC_CONF"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
|
||||
mock_logger = MagicMock()
|
||||
|
||||
# Set the PyTorch CUDA memory allocator to cudaMallocAsync
|
||||
configure_torch_cuda_allocator("backend:cudaMallocAsync", logger=mock_logger)
|
||||
|
||||
# Verify that the PyTorch CUDA memory allocator was configured correctly
|
||||
import torch
|
||||
|
||||
assert torch.cuda.get_allocator_backend() == "cudaMallocAsync"
|
||||
|
||||
# Verify that the logger was called with the correct message
|
||||
mock_logger.info.assert_called_once()
|
||||
args, _kwargs = mock_logger.info.call_args
|
||||
logged_message = args[0]
|
||||
print(logged_message)
|
||||
|
||||
stdout, _stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
assert returncode == 0
|
||||
assert "PyTorch CUDA memory allocator: cudaMallocAsync" in stdout
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA device.")
|
||||
def test_configure_torch_cuda_allocator_raises_if_torch_already_imported():
|
||||
"""Test that configure_torch_cuda_allocator() raises a RuntimeError if torch was already imported."""
|
||||
|
||||
def test_func():
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Import torch before calling configure_torch_cuda_allocator()
|
||||
import torch # noqa: F401
|
||||
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
|
||||
try:
|
||||
configure_torch_cuda_allocator("backend:cudaMallocAsync", logger=MagicMock())
|
||||
except RuntimeError as e:
|
||||
print(e)
|
||||
|
||||
stdout, _stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
assert returncode == 0
|
||||
assert "configure_torch_cuda_allocator() must be called before importing torch." in stdout
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA device.")
|
||||
def test_configure_torch_cuda_allocator_warns_if_env_var_is_set_differently():
|
||||
"""Test that configure_torch_cuda_allocator() logs at WARNING level if PYTORCH_CUDA_ALLOC_CONF is set and doesn't
|
||||
match the requested configuration."""
|
||||
|
||||
def test_func():
|
||||
import os
|
||||
|
||||
# Explicitly set the environment variable
|
||||
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "backend:native"
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
|
||||
mock_logger = MagicMock()
|
||||
|
||||
# Set the PyTorch CUDA memory allocator a different configuration
|
||||
configure_torch_cuda_allocator("backend:cudaMallocAsync", logger=mock_logger)
|
||||
|
||||
# Verify that the logger was called with the correct message
|
||||
mock_logger.warning.assert_called_once()
|
||||
args, _kwargs = mock_logger.warning.call_args
|
||||
logged_message = args[0]
|
||||
print(logged_message)
|
||||
|
||||
stdout, _stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
assert returncode == 0
|
||||
assert "Attempted to configure the PyTorch CUDA memory allocator with 'backend:cudaMallocAsync'" in stdout
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires CUDA device.")
|
||||
def test_configure_torch_cuda_allocator_logs_if_env_var_is_already_set_correctly():
|
||||
"""Test that configure_torch_cuda_allocator() logs at INFO level if PYTORCH_CUDA_ALLOC_CONF is set and matches the
|
||||
requested configuration."""
|
||||
|
||||
def test_func():
|
||||
import os
|
||||
|
||||
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "backend:native"
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
|
||||
mock_logger = MagicMock()
|
||||
|
||||
configure_torch_cuda_allocator("backend:native", logger=mock_logger)
|
||||
|
||||
mock_logger.info.assert_called_once()
|
||||
args, _kwargs = mock_logger.info.call_args
|
||||
logged_message = args[0]
|
||||
print(logged_message)
|
||||
|
||||
stdout, _stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
assert returncode == 0
|
||||
assert "PYTORCH_CUDA_ALLOC_CONF is already set to 'backend:native'" in stdout
|
||||
@@ -1,46 +0,0 @@
|
||||
import inspect
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
from typing import Any, Callable
|
||||
|
||||
|
||||
def dangerously_run_function_in_subprocess(func: Callable[[], Any]) -> tuple[str, str, int]:
|
||||
"""**Use with caution! This should _only_ be used with trusted code!**
|
||||
|
||||
Extracts a function's source and runs it in a separate subprocess. Returns stdout, stderr, and return code
|
||||
from the subprocess.
|
||||
|
||||
This is useful for tests where an isolated environment is required.
|
||||
|
||||
The function to be called must not have any arguments and must not have any closures over the scope in which is was
|
||||
defined.
|
||||
|
||||
Any modules that the function depends on must be imported inside the function.
|
||||
"""
|
||||
|
||||
source_code = inspect.getsource(func)
|
||||
|
||||
# Must dedent the source code to avoid indentation errors
|
||||
dedented_source_code = textwrap.dedent(source_code)
|
||||
|
||||
# Get the function name so we can call it in the subprocess
|
||||
func_name = func.__name__
|
||||
|
||||
# Create a script that calls the function
|
||||
script = f"""
|
||||
import sys
|
||||
|
||||
{dedented_source_code}
|
||||
|
||||
if __name__ == "__main__":
|
||||
{func_name}()
|
||||
"""
|
||||
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-c", textwrap.dedent(script)], # Run the script in a subprocess
|
||||
capture_output=True, # Capture stdout and stderr
|
||||
text=True,
|
||||
)
|
||||
|
||||
return result.stdout, result.stderr, result.returncode
|
||||
@@ -1,57 +0,0 @@
|
||||
from tests.dangerously_run_function_in_subprocess import dangerously_run_function_in_subprocess
|
||||
|
||||
|
||||
def test_simple_function():
|
||||
def test_func():
|
||||
print("Hello, Test!")
|
||||
|
||||
stdout, stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
|
||||
assert returncode == 0
|
||||
assert stdout.strip() == "Hello, Test!"
|
||||
assert stderr == ""
|
||||
|
||||
|
||||
def test_function_with_error():
|
||||
def test_func():
|
||||
raise ValueError("This is an error")
|
||||
|
||||
_stdout, stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
|
||||
assert returncode != 0 # Should fail
|
||||
assert "ValueError: This is an error" in stderr
|
||||
|
||||
|
||||
def test_function_with_imports():
|
||||
def test_func():
|
||||
import math
|
||||
|
||||
print(math.sqrt(4))
|
||||
|
||||
stdout, stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
|
||||
assert returncode == 0
|
||||
assert stdout.strip() == "2.0"
|
||||
assert stderr == ""
|
||||
|
||||
|
||||
def test_function_with_sys_exit():
|
||||
def test_func():
|
||||
import sys
|
||||
|
||||
sys.exit(42)
|
||||
|
||||
_stdout, _stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
|
||||
assert returncode == 42 # Should return the custom exit code
|
||||
|
||||
|
||||
def test_function_with_closure():
|
||||
foo = "bar"
|
||||
|
||||
def test_func():
|
||||
print(foo)
|
||||
|
||||
_stdout, _stderr, returncode = dangerously_run_function_in_subprocess(test_func)
|
||||
|
||||
assert returncode == 1 # Should fail because of closure
|
||||
Reference in New Issue
Block a user