mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-02-01 09:05:18 -05:00
Compare commits
1 Commits
v6.3.0rc1
...
psychedeli
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1a4376b75 |
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -21,20 +21,6 @@ body:
|
|||||||
- label: I have searched the existing issues
|
- label: I have searched the existing issues
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: dropdown
|
|
||||||
id: install_method
|
|
||||||
attributes:
|
|
||||||
label: Install method
|
|
||||||
description: How did you install Invoke?
|
|
||||||
multiple: false
|
|
||||||
options:
|
|
||||||
- "Invoke's Launcher"
|
|
||||||
- 'Stability Matrix'
|
|
||||||
- 'Pinokio'
|
|
||||||
- 'Manual'
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: __Describe your environment__
|
value: __Describe your environment__
|
||||||
@@ -90,8 +76,8 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Version number
|
label: Version number
|
||||||
description: |
|
description: |
|
||||||
The version of Invoke you have installed. If it is not the [latest version](https://github.com/invoke-ai/InvokeAI/releases/latest), please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
placeholder: ex. v6.0.2
|
placeholder: ex. 3.6.1
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
@@ -99,17 +85,17 @@ body:
|
|||||||
id: browser-version
|
id: browser-version
|
||||||
attributes:
|
attributes:
|
||||||
label: Browser
|
label: Browser
|
||||||
description: Your web browser and version, if you do not use the Launcher's provided GUI.
|
description: Your web browser and version.
|
||||||
placeholder: ex. Firefox 123.0b3
|
placeholder: ex. Firefox 123.0b3
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: python-deps
|
id: python-deps
|
||||||
attributes:
|
attributes:
|
||||||
label: System Information
|
label: Python dependencies
|
||||||
description: |
|
description: |
|
||||||
Click the gear icon at the bottom left corner, then click "About". Click the copy button and then paste here.
|
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
|||||||
12
.github/workflows/typegen-checks.yml
vendored
12
.github/workflows/typegen-checks.yml
vendored
@@ -39,18 +39,6 @@ jobs:
|
|||||||
- name: checkout
|
- name: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Free up more disk space on the runner
|
|
||||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
|
||||||
run: |
|
|
||||||
echo "----- Free space before cleanup"
|
|
||||||
df -h
|
|
||||||
sudo rm -rf /usr/share/dotnet
|
|
||||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
|
||||||
sudo swapoff /mnt/swapfile
|
|
||||||
sudo rm -rf /mnt/swapfile
|
|
||||||
echo "----- Free space after cleanup"
|
|
||||||
df -h
|
|
||||||
|
|
||||||
- name: check for changed files
|
- name: check for changed files
|
||||||
if: ${{ inputs.always_run != true }}
|
if: ${{ inputs.always_run != true }}
|
||||||
id: changed-files
|
id: changed-files
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -190,5 +190,3 @@ installer/update.bat
|
|||||||
installer/update.sh
|
installer/update.sh
|
||||||
installer/InvokeAI-Installer/
|
installer/InvokeAI-Installer/
|
||||||
.aider*
|
.aider*
|
||||||
|
|
||||||
.claude/
|
|
||||||
|
|||||||
@@ -22,10 +22,6 @@
|
|||||||
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
|
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
|
||||||
# GPU_DRIVER=cuda #| rocm
|
# GPU_DRIVER=cuda #| rocm
|
||||||
|
|
||||||
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
|
|
||||||
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
|
|
||||||
# RENDER_GROUP_ID=
|
|
||||||
|
|
||||||
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
||||||
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
|
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
|
||||||
# CONTAINER_UID=1000
|
# CONTAINER_UID=1000
|
||||||
|
|||||||
@@ -5,7 +5,8 @@
|
|||||||
FROM docker.io/node:22-slim AS web-builder
|
FROM docker.io/node:22-slim AS web-builder
|
||||||
ENV PNPM_HOME="/pnpm"
|
ENV PNPM_HOME="/pnpm"
|
||||||
ENV PATH="$PNPM_HOME:$PATH"
|
ENV PATH="$PNPM_HOME:$PATH"
|
||||||
RUN corepack use pnpm@10.x && corepack enable
|
RUN corepack use pnpm@8.x
|
||||||
|
RUN corepack enable
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY invokeai/frontend/web/ ./
|
COPY invokeai/frontend/web/ ./
|
||||||
@@ -43,6 +44,7 @@ ENV \
|
|||||||
UV_MANAGED_PYTHON=1 \
|
UV_MANAGED_PYTHON=1 \
|
||||||
UV_LINK_MODE=copy \
|
UV_LINK_MODE=copy \
|
||||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||||
|
UV_INDEX="https://download.pytorch.org/whl/cu124" \
|
||||||
INVOKEAI_ROOT=/invokeai \
|
INVOKEAI_ROOT=/invokeai \
|
||||||
INVOKEAI_HOST=0.0.0.0 \
|
INVOKEAI_HOST=0.0.0.0 \
|
||||||
INVOKEAI_PORT=9090 \
|
INVOKEAI_PORT=9090 \
|
||||||
@@ -73,18 +75,20 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||||
ulimit -n 30000 && \
|
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||||
uv sync --extra $GPU_DRIVER --frozen
|
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
|
||||||
|
fi && \
|
||||||
# Link amdgpu.ids for ROCm builds
|
uv sync --frozen
|
||||||
# contributed by https://github.com/Rubonnek
|
|
||||||
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
|
||||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" && groupadd render
|
|
||||||
|
|
||||||
# build patchmatch
|
# build patchmatch
|
||||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||||
RUN python -c "from patchmatch import patch_match"
|
RUN python -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
|
# Link amdgpu.ids for ROCm builds
|
||||||
|
# contributed by https://github.com/Rubonnek
|
||||||
|
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||||
|
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||||
|
|
||||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||||
|
|
||||||
COPY docker/docker-entrypoint.sh ./
|
COPY docker/docker-entrypoint.sh ./
|
||||||
@@ -102,6 +106,8 @@ COPY invokeai ${INVOKEAI_SRC}/invokeai
|
|||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||||
ulimit -n 30000 && \
|
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||||
uv pip install -e .[$GPU_DRIVER]
|
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
|
||||||
|
fi && \
|
||||||
|
uv pip install -e .
|
||||||
|
|
||||||
|
|||||||
@@ -1,136 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
|
||||||
|
|
||||||
#### Web UI ------------------------------------
|
|
||||||
|
|
||||||
FROM docker.io/node:22-slim AS web-builder
|
|
||||||
ENV PNPM_HOME="/pnpm"
|
|
||||||
ENV PATH="$PNPM_HOME:$PATH"
|
|
||||||
RUN corepack use pnpm@8.x
|
|
||||||
RUN corepack enable
|
|
||||||
|
|
||||||
WORKDIR /build
|
|
||||||
COPY invokeai/frontend/web/ ./
|
|
||||||
RUN --mount=type=cache,target=/pnpm/store \
|
|
||||||
pnpm install --frozen-lockfile
|
|
||||||
RUN npx vite build
|
|
||||||
|
|
||||||
## Backend ---------------------------------------
|
|
||||||
|
|
||||||
FROM library/ubuntu:24.04
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
|
||||||
--mount=type=cache,target=/var/lib/apt \
|
|
||||||
apt update && apt install -y --no-install-recommends \
|
|
||||||
ca-certificates \
|
|
||||||
git \
|
|
||||||
gosu \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgl1 \
|
|
||||||
libglx-mesa0 \
|
|
||||||
build-essential \
|
|
||||||
libopencv-dev \
|
|
||||||
libstdc++-10-dev \
|
|
||||||
wget
|
|
||||||
|
|
||||||
ENV \
|
|
||||||
PYTHONUNBUFFERED=1 \
|
|
||||||
PYTHONDONTWRITEBYTECODE=1 \
|
|
||||||
VIRTUAL_ENV=/opt/venv \
|
|
||||||
INVOKEAI_SRC=/opt/invokeai \
|
|
||||||
PYTHON_VERSION=3.12 \
|
|
||||||
UV_PYTHON=3.12 \
|
|
||||||
UV_COMPILE_BYTECODE=1 \
|
|
||||||
UV_MANAGED_PYTHON=1 \
|
|
||||||
UV_LINK_MODE=copy \
|
|
||||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
|
||||||
INVOKEAI_ROOT=/invokeai \
|
|
||||||
INVOKEAI_HOST=0.0.0.0 \
|
|
||||||
INVOKEAI_PORT=9090 \
|
|
||||||
PATH="/opt/venv/bin:$PATH" \
|
|
||||||
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
|
||||||
CONTAINER_GID=${CONTAINER_GID:-1000}
|
|
||||||
|
|
||||||
ARG GPU_DRIVER=cuda
|
|
||||||
|
|
||||||
# Install `uv` for package management
|
|
||||||
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
|
||||||
|
|
||||||
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
||||||
uv python install ${PYTHON_VERSION} && \
|
|
||||||
# chmod --recursive a+rX /root/.local/share/uv/python
|
|
||||||
chmod 711 /root
|
|
||||||
|
|
||||||
WORKDIR ${INVOKEAI_SRC}
|
|
||||||
|
|
||||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
|
||||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
|
||||||
#
|
|
||||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
|
||||||
# x86_64/CUDA is the default
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
||||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
|
||||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
|
||||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
|
||||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
|
||||||
ulimit -n 30000 && \
|
|
||||||
uv sync --extra $GPU_DRIVER --frozen
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt \
|
|
||||||
--mount=type=cache,target=/var/lib/apt \
|
|
||||||
if [ "$GPU_DRIVER" = "rocm" ]; then \
|
|
||||||
wget -O /tmp/amdgpu-install.deb \
|
|
||||||
https://repo.radeon.com/amdgpu-install/6.3.4/ubuntu/noble/amdgpu-install_6.3.60304-1_all.deb && \
|
|
||||||
apt install -y /tmp/amdgpu-install.deb && \
|
|
||||||
apt update && \
|
|
||||||
amdgpu-install --usecase=rocm -y && \
|
|
||||||
apt-get autoclean && \
|
|
||||||
apt clean && \
|
|
||||||
rm -rf /tmp/* /var/tmp/* && \
|
|
||||||
usermod -a -G render ubuntu && \
|
|
||||||
usermod -a -G video ubuntu && \
|
|
||||||
echo "\\n/opt/rocm/lib\\n/opt/rocm/lib64" >> /etc/ld.so.conf.d/rocm.conf && \
|
|
||||||
ldconfig && \
|
|
||||||
update-alternatives --auto rocm; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Heathen711: Leaving this for review input, will remove before merge
|
|
||||||
# RUN --mount=type=cache,target=/var/cache/apt \
|
|
||||||
# --mount=type=cache,target=/var/lib/apt \
|
|
||||||
# if [ "$GPU_DRIVER" = "rocm" ]; then \
|
|
||||||
# groupadd render && \
|
|
||||||
# usermod -a -G render ubuntu && \
|
|
||||||
# usermod -a -G video ubuntu; \
|
|
||||||
# fi
|
|
||||||
|
|
||||||
## Link amdgpu.ids for ROCm builds
|
|
||||||
## contributed by https://github.com/Rubonnek
|
|
||||||
# RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
|
||||||
# ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
|
||||||
|
|
||||||
# build patchmatch
|
|
||||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
|
||||||
RUN python -c "from patchmatch import patch_match"
|
|
||||||
|
|
||||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
|
||||||
|
|
||||||
COPY docker/docker-entrypoint.sh ./
|
|
||||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
|
||||||
CMD ["invokeai-web"]
|
|
||||||
|
|
||||||
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
|
||||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
|
||||||
|
|
||||||
# add sources last to minimize image changes on code changes
|
|
||||||
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
|
||||||
|
|
||||||
# this should not increase image size because we've already installed dependencies
|
|
||||||
# in a previous layer
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
||||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
|
||||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
|
||||||
ulimit -n 30000 && \
|
|
||||||
uv pip install -e .[$GPU_DRIVER]
|
|
||||||
|
|
||||||
@@ -47,9 +47,8 @@ services:
|
|||||||
|
|
||||||
invokeai-rocm:
|
invokeai-rocm:
|
||||||
<<: *invokeai
|
<<: *invokeai
|
||||||
environment:
|
devices:
|
||||||
- AMD_VISIBLE_DEVICES=all
|
- /dev/kfd:/dev/kfd
|
||||||
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
|
- /dev/dri:/dev/dri
|
||||||
runtime: amd
|
|
||||||
profiles:
|
profiles:
|
||||||
- rocm
|
- rocm
|
||||||
|
|||||||
@@ -21,17 +21,6 @@ _=$(id ${USER} 2>&1) || useradd -u ${USER_ID} ${USER}
|
|||||||
# ensure the UID is correct
|
# ensure the UID is correct
|
||||||
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||||
|
|
||||||
## ROCM specific configuration
|
|
||||||
# render group within the container must match the host render group
|
|
||||||
# otherwise the container will not be able to access the host GPU.
|
|
||||||
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
|
|
||||||
# ensure the render group exists
|
|
||||||
groupmod -g ${RENDER_GROUP_ID} render
|
|
||||||
usermod -a -G render ${USER}
|
|
||||||
usermod -a -G video ${USER}
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
### Set the $PUBLIC_KEY env var to enable SSH access.
|
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||||
# We do not install openssh-server in the image by default to avoid bloat.
|
# We do not install openssh-server in the image by default to avoid bloat.
|
||||||
# but it is useful to have the full SSH server e.g. on Runpod.
|
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ run() {
|
|||||||
|
|
||||||
# parse .env file for build args
|
# parse .env file for build args
|
||||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||||
profile="$(awk -F '=' '/GPU_DRIVER=/ {print $2}' .env)"
|
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||||
|
|
||||||
# default to 'cuda' profile
|
# default to 'cuda' profile
|
||||||
[[ -z "$profile" ]] && profile="cuda"
|
[[ -z "$profile" ]] && profile="cuda"
|
||||||
@@ -30,7 +30,7 @@ run() {
|
|||||||
|
|
||||||
printf "%s\n" "starting service $service_name"
|
printf "%s\n" "starting service $service_name"
|
||||||
docker compose --profile "$profile" up -d "$service_name"
|
docker compose --profile "$profile" up -d "$service_name"
|
||||||
docker compose --profile "$profile" logs -f
|
docker compose logs -f
|
||||||
}
|
}
|
||||||
|
|
||||||
run
|
run
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
|||||||
With the modifications made, the install command should look something like this:
|
With the modifications made, the install command should look something like this:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall
|
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu126 --reinstall
|
||||||
```
|
```
|
||||||
|
|
||||||
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||||
@@ -50,11 +50,11 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
|||||||
|
|
||||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||||
|
|
||||||
7. Install the frontend dev toolchain, paying attention to versions:
|
7. Install the frontend dev toolchain:
|
||||||
|
|
||||||
- [`nodejs`](https://nodejs.org/) (tested on LTS, v22)
|
- [`nodejs`](https://nodejs.org/) (v20+)
|
||||||
|
|
||||||
- [`pnpm`](https://pnpm.io/installation) (tested on v10)
|
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||||
|
|
||||||
8. Do a production build of the frontend:
|
8. Do a production build of the frontend:
|
||||||
|
|
||||||
|
|||||||
@@ -297,7 +297,7 @@ Migration logic is in [migrations.ts].
|
|||||||
<!-- links -->
|
<!-- links -->
|
||||||
|
|
||||||
[pydantic]: https://github.com/pydantic/pydantic 'pydantic'
|
[pydantic]: https://github.com/pydantic/pydantic 'pydantic'
|
||||||
[zod]: https://github.com/colinhacks/zod 'zod'
|
[zod]: https://github.com/colinhacks/zod 'zod/v4'
|
||||||
[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types'
|
[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types'
|
||||||
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
|
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
|
||||||
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions
|
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions
|
||||||
|
|||||||
@@ -69,34 +69,34 @@ The following commands vary depending on the version of Invoke being installed a
|
|||||||
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
|
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
|
||||||
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
|
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
|
||||||
|
|
||||||
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using [UV's built in torch support.](https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection)
|
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||||
|
|
||||||
=== "Invoke v5.12 and later"
|
=== "Invoke v5.12 and later"
|
||||||
|
|
||||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
|
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
|
||||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
|
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||||
- **In all other cases, do not use a torch backend.**
|
- **In all other cases, do not use an index.**
|
||||||
|
|
||||||
=== "Invoke v5.10.0 to v5.11.0"
|
=== "Invoke v5.10.0 to v5.11.0"
|
||||||
|
|
||||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
|
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
|
||||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
|
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||||
- **In all other cases, do not use an index.**
|
- **In all other cases, do not use an index.**
|
||||||
|
|
||||||
=== "Invoke v5.0.0 to v5.9.1"
|
=== "Invoke v5.0.0 to v5.9.1"
|
||||||
|
|
||||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
|
||||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
|
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.1`.
|
||||||
- **In all other cases, do not use an index.**
|
- **In all other cases, do not use an index.**
|
||||||
|
|
||||||
=== "Invoke v4"
|
=== "Invoke v4"
|
||||||
|
|
||||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
|
||||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
|
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm5.2`.
|
||||||
- **In all other cases, do not use an index.**
|
- **In all other cases, do not use an index.**
|
||||||
|
|
||||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||||
@@ -105,10 +105,10 @@ The following commands vary depending on the version of Invoke being installed a
|
|||||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
|
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
|
||||||
```
|
```
|
||||||
|
|
||||||
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
|
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
|
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ from invokeai.app.services.board_images.board_images_default import BoardImagesS
|
|||||||
from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
|
from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
|
||||||
from invokeai.app.services.boards.boards_default import BoardService
|
from invokeai.app.services.boards.boards_default import BoardService
|
||||||
from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
|
from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
|
||||||
from invokeai.app.services.client_state_persistence.client_state_persistence_sqlite import ClientStatePersistenceSqlite
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||||
from invokeai.app.services.download.download_default import DownloadQueueService
|
from invokeai.app.services.download.download_default import DownloadQueueService
|
||||||
from invokeai.app.services.events.events_fastapievents import FastAPIEventService
|
from invokeai.app.services.events.events_fastapievents import FastAPIEventService
|
||||||
@@ -152,7 +151,6 @@ class ApiDependencies:
|
|||||||
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
||||||
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
||||||
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
|
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
|
||||||
client_state_persistence = ClientStatePersistenceSqlite(db=db)
|
|
||||||
|
|
||||||
services = InvocationServices(
|
services = InvocationServices(
|
||||||
board_image_records=board_image_records,
|
board_image_records=board_image_records,
|
||||||
@@ -183,7 +181,6 @@ class ApiDependencies:
|
|||||||
style_preset_records=style_preset_records,
|
style_preset_records=style_preset_records,
|
||||||
style_preset_image_files=style_preset_image_files,
|
style_preset_image_files=style_preset_image_files,
|
||||||
workflow_thumbnails=workflow_thumbnails,
|
workflow_thumbnails=workflow_thumbnails,
|
||||||
client_state_persistence=client_state_persistence,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
ApiDependencies.invoker = Invoker(services)
|
ApiDependencies.invoker = Invoker(services)
|
||||||
|
|||||||
@@ -1,58 +0,0 @@
|
|||||||
from fastapi import Body, HTTPException, Path, Query
|
|
||||||
from fastapi.routing import APIRouter
|
|
||||||
|
|
||||||
from invokeai.app.api.dependencies import ApiDependencies
|
|
||||||
from invokeai.backend.util.logging import logging
|
|
||||||
|
|
||||||
client_state_router = APIRouter(prefix="/v1/client_state", tags=["client_state"])
|
|
||||||
|
|
||||||
|
|
||||||
@client_state_router.get(
|
|
||||||
"/{queue_id}/get_by_key",
|
|
||||||
operation_id="get_client_state_by_key",
|
|
||||||
response_model=str | None,
|
|
||||||
)
|
|
||||||
async def get_client_state_by_key(
|
|
||||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
|
||||||
key: str = Query(..., description="Key to get"),
|
|
||||||
) -> str | None:
|
|
||||||
"""Gets the client state"""
|
|
||||||
try:
|
|
||||||
return ApiDependencies.invoker.services.client_state_persistence.get_by_key(queue_id, key)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error getting client state: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail="Error setting client state")
|
|
||||||
|
|
||||||
|
|
||||||
@client_state_router.post(
|
|
||||||
"/{queue_id}/set_by_key",
|
|
||||||
operation_id="set_client_state",
|
|
||||||
response_model=str,
|
|
||||||
)
|
|
||||||
async def set_client_state(
|
|
||||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
|
||||||
key: str = Query(..., description="Key to set"),
|
|
||||||
value: str = Body(..., description="Stringified value to set"),
|
|
||||||
) -> str:
|
|
||||||
"""Sets the client state"""
|
|
||||||
try:
|
|
||||||
return ApiDependencies.invoker.services.client_state_persistence.set_by_key(queue_id, key, value)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error setting client state: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail="Error setting client state")
|
|
||||||
|
|
||||||
|
|
||||||
@client_state_router.post(
|
|
||||||
"/{queue_id}/delete",
|
|
||||||
operation_id="delete_client_state",
|
|
||||||
responses={204: {"description": "Client state deleted"}},
|
|
||||||
)
|
|
||||||
async def delete_client_state(
|
|
||||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
|
||||||
) -> None:
|
|
||||||
"""Deletes the client state"""
|
|
||||||
try:
|
|
||||||
ApiDependencies.invoker.services.client_state_persistence.delete(queue_id)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error deleting client state: {e}")
|
|
||||||
raise HTTPException(status_code=500, detail="Error deleting client state")
|
|
||||||
@@ -19,7 +19,6 @@ from invokeai.app.api.routers import (
|
|||||||
app_info,
|
app_info,
|
||||||
board_images,
|
board_images,
|
||||||
boards,
|
boards,
|
||||||
client_state,
|
|
||||||
download_queue,
|
download_queue,
|
||||||
images,
|
images,
|
||||||
model_manager,
|
model_manager,
|
||||||
@@ -132,7 +131,6 @@ app.include_router(app_info.app_router, prefix="/api")
|
|||||||
app.include_router(session_queue.session_queue_router, prefix="/api")
|
app.include_router(session_queue.session_queue_router, prefix="/api")
|
||||||
app.include_router(workflows.workflows_router, prefix="/api")
|
app.include_router(workflows.workflows_router, prefix="/api")
|
||||||
app.include_router(style_presets.style_presets_router, prefix="/api")
|
app.include_router(style_presets.style_presets_router, prefix="/api")
|
||||||
app.include_router(client_state.client_state_router, prefix="/api")
|
|
||||||
|
|
||||||
app.openapi = get_openapi_func(app)
|
app.openapi = get_openapi_func(app)
|
||||||
|
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ from invokeai.backend.util.devices import TorchDevice
|
|||||||
title="FLUX Denoise",
|
title="FLUX Denoise",
|
||||||
tags=["image", "flux"],
|
tags=["image", "flux"],
|
||||||
category="image",
|
category="image",
|
||||||
version="4.1.0",
|
version="4.0.0",
|
||||||
)
|
)
|
||||||
class FluxDenoiseInvocation(BaseInvocation):
|
class FluxDenoiseInvocation(BaseInvocation):
|
||||||
"""Run denoising process with a FLUX transformer model."""
|
"""Run denoising process with a FLUX transformer model."""
|
||||||
@@ -153,7 +153,7 @@ class FluxDenoiseInvocation(BaseInvocation):
|
|||||||
description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection
|
description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection
|
||||||
)
|
)
|
||||||
|
|
||||||
kontext_conditioning: FluxKontextConditioningField | list[FluxKontextConditioningField] | None = InputField(
|
kontext_conditioning: Optional[FluxKontextConditioningField] = InputField(
|
||||||
default=None,
|
default=None,
|
||||||
description="FLUX Kontext conditioning (reference image).",
|
description="FLUX Kontext conditioning (reference image).",
|
||||||
input=Input.Connection,
|
input=Input.Connection,
|
||||||
@@ -386,15 +386,13 @@ class FluxDenoiseInvocation(BaseInvocation):
|
|||||||
)
|
)
|
||||||
|
|
||||||
kontext_extension = None
|
kontext_extension = None
|
||||||
if self.kontext_conditioning:
|
if self.kontext_conditioning is not None:
|
||||||
if not self.controlnet_vae:
|
if not self.controlnet_vae:
|
||||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||||
|
|
||||||
kontext_extension = KontextExtension(
|
kontext_extension = KontextExtension(
|
||||||
context=context,
|
context=context,
|
||||||
kontext_conditioning=self.kontext_conditioning
|
kontext_conditioning=self.kontext_conditioning,
|
||||||
if isinstance(self.kontext_conditioning, list)
|
|
||||||
else [self.kontext_conditioning],
|
|
||||||
vae_field=self.controlnet_vae,
|
vae_field=self.controlnet_vae,
|
||||||
device=TorchDevice.choose_torch_device(),
|
device=TorchDevice.choose_torch_device(),
|
||||||
dtype=inference_dtype,
|
dtype=inference_dtype,
|
||||||
|
|||||||
@@ -430,15 +430,6 @@ class FluxConditioningOutput(BaseInvocationOutput):
|
|||||||
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("flux_conditioning_collection_output")
|
|
||||||
class FluxConditioningCollectionOutput(BaseInvocationOutput):
|
|
||||||
"""Base class for nodes that output a collection of conditioning tensors"""
|
|
||||||
|
|
||||||
collection: list[FluxConditioningField] = OutputField(
|
|
||||||
description="The output conditioning tensors",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("sd3_conditioning_output")
|
@invocation_output("sd3_conditioning_output")
|
||||||
class SD3ConditioningOutput(BaseInvocationOutput):
|
class SD3ConditioningOutput(BaseInvocationOutput):
|
||||||
"""Base class for nodes that output a single SD3 conditioning tensor"""
|
"""Base class for nodes that output a single SD3 conditioning tensor"""
|
||||||
|
|||||||
@@ -14,14 +14,15 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
|||||||
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def add_image_to_board(
|
def add_image_to_board(
|
||||||
self,
|
self,
|
||||||
board_id: str,
|
board_id: str,
|
||||||
image_name: str,
|
image_name: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT INTO board_images (board_id, image_name)
|
INSERT INTO board_images (board_id, image_name)
|
||||||
@@ -30,12 +31,17 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
|||||||
""",
|
""",
|
||||||
(board_id, image_name, board_id),
|
(board_id, image_name, board_id),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
def remove_image_from_board(
|
def remove_image_from_board(
|
||||||
self,
|
self,
|
||||||
image_name: str,
|
image_name: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE FROM board_images
|
DELETE FROM board_images
|
||||||
@@ -43,6 +49,10 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
|||||||
""",
|
""",
|
||||||
(image_name,),
|
(image_name,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
def get_images_for_board(
|
def get_images_for_board(
|
||||||
self,
|
self,
|
||||||
@@ -50,26 +60,27 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
|||||||
offset: int = 0,
|
offset: int = 0,
|
||||||
limit: int = 10,
|
limit: int = 10,
|
||||||
) -> OffsetPaginatedResults[ImageRecord]:
|
) -> OffsetPaginatedResults[ImageRecord]:
|
||||||
with self._db.transaction() as cursor:
|
# TODO: this isn't paginated yet?
|
||||||
cursor.execute(
|
cursor = self._conn.cursor()
|
||||||
"""--sql
|
cursor.execute(
|
||||||
SELECT images.*
|
"""--sql
|
||||||
FROM board_images
|
SELECT images.*
|
||||||
INNER JOIN images ON board_images.image_name = images.image_name
|
FROM board_images
|
||||||
WHERE board_images.board_id = ?
|
INNER JOIN images ON board_images.image_name = images.image_name
|
||||||
ORDER BY board_images.updated_at DESC;
|
WHERE board_images.board_id = ?
|
||||||
""",
|
ORDER BY board_images.updated_at DESC;
|
||||||
(board_id,),
|
""",
|
||||||
)
|
(board_id,),
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
)
|
||||||
images = [deserialize_image_record(dict(r)) for r in result]
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
|
images = [deserialize_image_record(dict(r)) for r in result]
|
||||||
|
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT COUNT(*) FROM images WHERE 1=1;
|
SELECT COUNT(*) FROM images WHERE 1=1;
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
count = cast(int, cursor.fetchone()[0])
|
count = cast(int, cursor.fetchone()[0])
|
||||||
|
|
||||||
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
||||||
|
|
||||||
@@ -79,55 +90,56 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
|||||||
categories: list[ImageCategory] | None,
|
categories: list[ImageCategory] | None,
|
||||||
is_intermediate: bool | None,
|
is_intermediate: bool | None,
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
with self._db.transaction() as cursor:
|
params: list[str | bool] = []
|
||||||
params: list[str | bool] = []
|
|
||||||
|
|
||||||
# Base query is a join between images and board_images
|
# Base query is a join between images and board_images
|
||||||
stmt = """
|
stmt = """
|
||||||
SELECT images.image_name
|
SELECT images.image_name
|
||||||
FROM images
|
FROM images
|
||||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||||
WHERE 1=1
|
WHERE 1=1
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Handle board_id filter
|
# Handle board_id filter
|
||||||
if board_id == "none":
|
if board_id == "none":
|
||||||
stmt += """--sql
|
stmt += """--sql
|
||||||
AND board_images.board_id IS NULL
|
AND board_images.board_id IS NULL
|
||||||
"""
|
"""
|
||||||
else:
|
else:
|
||||||
stmt += """--sql
|
stmt += """--sql
|
||||||
AND board_images.board_id = ?
|
AND board_images.board_id = ?
|
||||||
"""
|
"""
|
||||||
params.append(board_id)
|
params.append(board_id)
|
||||||
|
|
||||||
# Add the category filter
|
# Add the category filter
|
||||||
if categories is not None:
|
if categories is not None:
|
||||||
# Convert the enum values to unique list of strings
|
# Convert the enum values to unique list of strings
|
||||||
category_strings = [c.value for c in set(categories)]
|
category_strings = [c.value for c in set(categories)]
|
||||||
# Create the correct length of placeholders
|
# Create the correct length of placeholders
|
||||||
placeholders = ",".join("?" * len(category_strings))
|
placeholders = ",".join("?" * len(category_strings))
|
||||||
stmt += f"""--sql
|
stmt += f"""--sql
|
||||||
AND images.image_category IN ( {placeholders} )
|
AND images.image_category IN ( {placeholders} )
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Unpack the included categories into the query params
|
# Unpack the included categories into the query params
|
||||||
for c in category_strings:
|
for c in category_strings:
|
||||||
params.append(c)
|
params.append(c)
|
||||||
|
|
||||||
# Add the is_intermediate filter
|
# Add the is_intermediate filter
|
||||||
if is_intermediate is not None:
|
if is_intermediate is not None:
|
||||||
stmt += """--sql
|
stmt += """--sql
|
||||||
AND images.is_intermediate = ?
|
AND images.is_intermediate = ?
|
||||||
"""
|
"""
|
||||||
params.append(is_intermediate)
|
params.append(is_intermediate)
|
||||||
|
|
||||||
# Put a ring on it
|
# Put a ring on it
|
||||||
stmt += ";"
|
stmt += ";"
|
||||||
|
|
||||||
cursor.execute(stmt, params)
|
# Execute the query
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
cursor.execute(stmt, params)
|
||||||
|
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
image_names = [r[0] for r in result]
|
image_names = [r[0] for r in result]
|
||||||
return image_names
|
return image_names
|
||||||
|
|
||||||
@@ -135,31 +147,31 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
|||||||
self,
|
self,
|
||||||
image_name: str,
|
image_name: str,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT board_id
|
SELECT board_id
|
||||||
FROM board_images
|
FROM board_images
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(image_name,),
|
(image_name,),
|
||||||
)
|
)
|
||||||
result = cursor.fetchone()
|
result = cursor.fetchone()
|
||||||
if result is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
return cast(str, result[0])
|
return cast(str, result[0])
|
||||||
|
|
||||||
def get_image_count_for_board(self, board_id: str) -> int:
|
def get_image_count_for_board(self, board_id: str) -> int:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT COUNT(*)
|
SELECT COUNT(*)
|
||||||
FROM board_images
|
FROM board_images
|
||||||
INNER JOIN images ON board_images.image_name = images.image_name
|
INNER JOIN images ON board_images.image_name = images.image_name
|
||||||
WHERE images.is_intermediate = FALSE
|
WHERE images.is_intermediate = FALSE
|
||||||
AND board_images.board_id = ?;
|
AND board_images.board_id = ?;
|
||||||
""",
|
""",
|
||||||
(board_id,),
|
(board_id,),
|
||||||
)
|
)
|
||||||
count = cast(int, cursor.fetchone()[0])
|
count = cast(int, cursor.fetchone()[0])
|
||||||
return count
|
return count
|
||||||
|
|||||||
@@ -20,57 +20,61 @@ from invokeai.app.util.misc import uuid_string
|
|||||||
class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def delete(self, board_id: str) -> None:
|
def delete(self, board_id: str) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE FROM boards
|
DELETE FROM boards
|
||||||
WHERE board_id = ?;
|
WHERE board_id = ?;
|
||||||
""",
|
""",
|
||||||
(board_id,),
|
(board_id,),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
self._conn.commit()
|
||||||
raise BoardRecordDeleteException from e
|
except Exception as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise BoardRecordDeleteException from e
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
board_name: str,
|
board_name: str,
|
||||||
) -> BoardRecord:
|
) -> BoardRecord:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
board_id = uuid_string()
|
||||||
board_id = uuid_string()
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT OR IGNORE INTO boards (board_id, board_name)
|
INSERT OR IGNORE INTO boards (board_id, board_name)
|
||||||
VALUES (?, ?);
|
VALUES (?, ?);
|
||||||
""",
|
""",
|
||||||
(board_id, board_name),
|
(board_id, board_name),
|
||||||
)
|
)
|
||||||
except sqlite3.Error as e:
|
self._conn.commit()
|
||||||
raise BoardRecordSaveException from e
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise BoardRecordSaveException from e
|
||||||
return self.get(board_id)
|
return self.get(board_id)
|
||||||
|
|
||||||
def get(
|
def get(
|
||||||
self,
|
self,
|
||||||
board_id: str,
|
board_id: str,
|
||||||
) -> BoardRecord:
|
) -> BoardRecord:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM boards
|
FROM boards
|
||||||
WHERE board_id = ?;
|
WHERE board_id = ?;
|
||||||
""",
|
""",
|
||||||
(board_id,),
|
(board_id,),
|
||||||
)
|
)
|
||||||
|
|
||||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
raise BoardRecordNotFoundException from e
|
raise BoardRecordNotFoundException from e
|
||||||
if result is None:
|
if result is None:
|
||||||
raise BoardRecordNotFoundException
|
raise BoardRecordNotFoundException
|
||||||
return BoardRecord(**dict(result))
|
return BoardRecord(**dict(result))
|
||||||
@@ -80,43 +84,45 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
|||||||
board_id: str,
|
board_id: str,
|
||||||
changes: BoardChanges,
|
changes: BoardChanges,
|
||||||
) -> BoardRecord:
|
) -> BoardRecord:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
# Change the name of a board
|
# Change the name of a board
|
||||||
if changes.board_name is not None:
|
if changes.board_name is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE boards
|
UPDATE boards
|
||||||
SET board_name = ?
|
SET board_name = ?
|
||||||
WHERE board_id = ?;
|
WHERE board_id = ?;
|
||||||
""",
|
""",
|
||||||
(changes.board_name, board_id),
|
(changes.board_name, board_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Change the cover image of a board
|
# Change the cover image of a board
|
||||||
if changes.cover_image_name is not None:
|
if changes.cover_image_name is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE boards
|
UPDATE boards
|
||||||
SET cover_image_name = ?
|
SET cover_image_name = ?
|
||||||
WHERE board_id = ?;
|
WHERE board_id = ?;
|
||||||
""",
|
""",
|
||||||
(changes.cover_image_name, board_id),
|
(changes.cover_image_name, board_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Change the archived status of a board
|
# Change the archived status of a board
|
||||||
if changes.archived is not None:
|
if changes.archived is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE boards
|
UPDATE boards
|
||||||
SET archived = ?
|
SET archived = ?
|
||||||
WHERE board_id = ?;
|
WHERE board_id = ?;
|
||||||
""",
|
""",
|
||||||
(changes.archived, board_id),
|
(changes.archived, board_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
self._conn.commit()
|
||||||
raise BoardRecordSaveException from e
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise BoardRecordSaveException from e
|
||||||
return self.get(board_id)
|
return self.get(board_id)
|
||||||
|
|
||||||
def get_many(
|
def get_many(
|
||||||
@@ -127,77 +133,78 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
|||||||
limit: int = 10,
|
limit: int = 10,
|
||||||
include_archived: bool = False,
|
include_archived: bool = False,
|
||||||
) -> OffsetPaginatedResults[BoardRecord]:
|
) -> OffsetPaginatedResults[BoardRecord]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
# Build base query
|
|
||||||
base_query = """
|
# Build base query
|
||||||
SELECT *
|
base_query = """
|
||||||
|
SELECT *
|
||||||
|
FROM boards
|
||||||
|
{archived_filter}
|
||||||
|
ORDER BY {order_by} {direction}
|
||||||
|
LIMIT ? OFFSET ?;
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Determine archived filter condition
|
||||||
|
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||||
|
|
||||||
|
final_query = base_query.format(
|
||||||
|
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute query to fetch boards
|
||||||
|
cursor.execute(final_query, (limit, offset))
|
||||||
|
|
||||||
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
|
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||||
|
|
||||||
|
# Determine count query
|
||||||
|
if include_archived:
|
||||||
|
count_query = """
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM boards;
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
count_query = """
|
||||||
|
SELECT COUNT(*)
|
||||||
FROM boards
|
FROM boards
|
||||||
{archived_filter}
|
WHERE archived = 0;
|
||||||
ORDER BY {order_by} {direction}
|
|
||||||
LIMIT ? OFFSET ?;
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Determine archived filter condition
|
# Execute count query
|
||||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
cursor.execute(count_query)
|
||||||
|
|
||||||
final_query = base_query.format(
|
count = cast(int, cursor.fetchone()[0])
|
||||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
|
||||||
)
|
|
||||||
|
|
||||||
# Execute query to fetch boards
|
|
||||||
cursor.execute(final_query, (limit, offset))
|
|
||||||
|
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
|
||||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
|
||||||
|
|
||||||
# Determine count query
|
|
||||||
if include_archived:
|
|
||||||
count_query = """
|
|
||||||
SELECT COUNT(*)
|
|
||||||
FROM boards;
|
|
||||||
"""
|
|
||||||
else:
|
|
||||||
count_query = """
|
|
||||||
SELECT COUNT(*)
|
|
||||||
FROM boards
|
|
||||||
WHERE archived = 0;
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Execute count query
|
|
||||||
cursor.execute(count_query)
|
|
||||||
|
|
||||||
count = cast(int, cursor.fetchone()[0])
|
|
||||||
|
|
||||||
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
|
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
|
||||||
|
|
||||||
def get_all(
|
def get_all(
|
||||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||||
) -> list[BoardRecord]:
|
) -> list[BoardRecord]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
if order_by == BoardRecordOrderBy.Name:
|
if order_by == BoardRecordOrderBy.Name:
|
||||||
base_query = """
|
base_query = """
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM boards
|
FROM boards
|
||||||
{archived_filter}
|
{archived_filter}
|
||||||
ORDER BY LOWER(board_name) {direction}
|
ORDER BY LOWER(board_name) {direction}
|
||||||
"""
|
"""
|
||||||
else:
|
else:
|
||||||
base_query = """
|
base_query = """
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM boards
|
FROM boards
|
||||||
{archived_filter}
|
{archived_filter}
|
||||||
ORDER BY {order_by} {direction}
|
ORDER BY {order_by} {direction}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||||
|
|
||||||
final_query = base_query.format(
|
final_query = base_query.format(
|
||||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||||
)
|
)
|
||||||
|
|
||||||
cursor.execute(final_query)
|
cursor.execute(final_query)
|
||||||
|
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||||
|
|
||||||
return boards
|
return boards
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
|
|
||||||
|
|
||||||
class ClientStatePersistenceABC(ABC):
|
|
||||||
"""
|
|
||||||
Base class for client persistence implementations.
|
|
||||||
This class defines the interface for persisting client data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def set_by_key(self, queue_id: str, key: str, value: str) -> str:
|
|
||||||
"""
|
|
||||||
Set a key-value pair for the client.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
key (str): The key to set.
|
|
||||||
value (str): The value to set for the key.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The value that was set.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_by_key(self, queue_id: str, key: str) -> str | None:
|
|
||||||
"""
|
|
||||||
Get the value for a specific key of the client.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
key (str): The key to retrieve the value for.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str | None: The value associated with the key, or None if the key does not exist.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def delete(self, queue_id: str) -> None:
|
|
||||||
"""
|
|
||||||
Delete all client state.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
import json
|
|
||||||
|
|
||||||
from invokeai.app.services.client_state_persistence.client_state_persistence_base import ClientStatePersistenceABC
|
|
||||||
from invokeai.app.services.invoker import Invoker
|
|
||||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
|
||||||
|
|
||||||
|
|
||||||
class ClientStatePersistenceSqlite(ClientStatePersistenceABC):
|
|
||||||
"""
|
|
||||||
Base class for client persistence implementations.
|
|
||||||
This class defines the interface for persisting client data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
|
||||||
super().__init__()
|
|
||||||
self._db = db
|
|
||||||
self._default_row_id = 1
|
|
||||||
|
|
||||||
def start(self, invoker: Invoker) -> None:
|
|
||||||
self._invoker = invoker
|
|
||||||
|
|
||||||
def _get(self) -> dict[str, str] | None:
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
f"""
|
|
||||||
SELECT data FROM client_state
|
|
||||||
WHERE id = {self._default_row_id}
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
row = cursor.fetchone()
|
|
||||||
if row is None:
|
|
||||||
return None
|
|
||||||
return json.loads(row[0])
|
|
||||||
|
|
||||||
def set_by_key(self, queue_id: str, key: str, value: str) -> str:
|
|
||||||
state = self._get() or {}
|
|
||||||
state.update({key: value})
|
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
f"""
|
|
||||||
INSERT INTO client_state (id, data)
|
|
||||||
VALUES ({self._default_row_id}, ?)
|
|
||||||
ON CONFLICT(id) DO UPDATE
|
|
||||||
SET data = excluded.data;
|
|
||||||
""",
|
|
||||||
(json.dumps(state),),
|
|
||||||
)
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
def get_by_key(self, queue_id: str, key: str) -> str | None:
|
|
||||||
state = self._get()
|
|
||||||
if state is None:
|
|
||||||
return None
|
|
||||||
return state.get(key, None)
|
|
||||||
|
|
||||||
def delete(self, queue_id: str) -> None:
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
f"""
|
|
||||||
DELETE FROM client_state
|
|
||||||
WHERE id = {self._default_row_id}
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
@@ -8,7 +8,6 @@ import time
|
|||||||
import traceback
|
import traceback
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from queue import Empty, PriorityQueue
|
from queue import Empty, PriorityQueue
|
||||||
from shutil import disk_usage
|
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set
|
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
@@ -336,14 +335,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
|
|
||||||
assert job.download_path
|
assert job.download_path
|
||||||
|
|
||||||
free_space = disk_usage(job.download_path.parent).free
|
|
||||||
GB = 2**30
|
|
||||||
self._logger.debug(f"Download is {job.total_bytes / GB:.2f} GB of {free_space / GB:.2f} GB free.")
|
|
||||||
if free_space < job.total_bytes:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Free disk space {free_space / GB:.2f} GB is not enough for download of {job.total_bytes / GB:.2f} GB."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Don't clobber an existing file. See commit 82c2c85202f88c6d24ff84710f297cfc6ae174af
|
# Don't clobber an existing file. See commit 82c2c85202f88c6d24ff84710f297cfc6ae174af
|
||||||
# for code that instead resumes an interrupted download.
|
# for code that instead resumes an interrupted download.
|
||||||
if job.download_path.exists():
|
if job.download_path.exists():
|
||||||
|
|||||||
@@ -24,22 +24,22 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
|||||||
class SqliteImageRecordStorage(ImageRecordStorageBase):
|
class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def get(self, image_name: str) -> ImageRecord:
|
def get(self, image_name: str) -> ImageRecord:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
f"""--sql
|
f"""--sql
|
||||||
SELECT {IMAGE_DTO_COLS} FROM images
|
SELECT {IMAGE_DTO_COLS} FROM images
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(image_name,),
|
(image_name,),
|
||||||
)
|
)
|
||||||
|
|
||||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||||
except sqlite3.Error as e:
|
except sqlite3.Error as e:
|
||||||
raise ImageRecordNotFoundException from e
|
raise ImageRecordNotFoundException from e
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise ImageRecordNotFoundException
|
raise ImageRecordNotFoundException
|
||||||
@@ -47,20 +47,17 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
return deserialize_image_record(dict(result))
|
return deserialize_image_record(dict(result))
|
||||||
|
|
||||||
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
|
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT metadata FROM images
|
SELECT metadata FROM images
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(image_name,),
|
(image_name,),
|
||||||
)
|
)
|
||||||
|
|
||||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
raise ImageRecordNotFoundException from e
|
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise ImageRecordNotFoundException
|
raise ImageRecordNotFoundException
|
||||||
@@ -68,60 +65,64 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
as_dict = dict(result)
|
as_dict = dict(result)
|
||||||
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
|
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
|
||||||
return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None
|
return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
raise ImageRecordNotFoundException from e
|
||||||
|
|
||||||
def update(
|
def update(
|
||||||
self,
|
self,
|
||||||
image_name: str,
|
image_name: str,
|
||||||
changes: ImageRecordChanges,
|
changes: ImageRecordChanges,
|
||||||
) -> None:
|
) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
# Change the category of the image
|
# Change the category of the image
|
||||||
if changes.image_category is not None:
|
if changes.image_category is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE images
|
UPDATE images
|
||||||
SET image_category = ?
|
SET image_category = ?
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(changes.image_category, image_name),
|
(changes.image_category, image_name),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Change the session associated with the image
|
# Change the session associated with the image
|
||||||
if changes.session_id is not None:
|
if changes.session_id is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE images
|
UPDATE images
|
||||||
SET session_id = ?
|
SET session_id = ?
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(changes.session_id, image_name),
|
(changes.session_id, image_name),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Change the image's `is_intermediate`` flag
|
# Change the image's `is_intermediate`` flag
|
||||||
if changes.is_intermediate is not None:
|
if changes.is_intermediate is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE images
|
UPDATE images
|
||||||
SET is_intermediate = ?
|
SET is_intermediate = ?
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(changes.is_intermediate, image_name),
|
(changes.is_intermediate, image_name),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Change the image's `starred`` state
|
# Change the image's `starred`` state
|
||||||
if changes.starred is not None:
|
if changes.starred is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE images
|
UPDATE images
|
||||||
SET starred = ?
|
SET starred = ?
|
||||||
WHERE image_name = ?;
|
WHERE image_name = ?;
|
||||||
""",
|
""",
|
||||||
(changes.starred, image_name),
|
(changes.starred, image_name),
|
||||||
)
|
)
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
self._conn.commit()
|
||||||
raise ImageRecordSaveException from e
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordSaveException from e
|
||||||
|
|
||||||
def get_many(
|
def get_many(
|
||||||
self,
|
self,
|
||||||
@@ -135,162 +136,170 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
board_id: Optional[str] = None,
|
board_id: Optional[str] = None,
|
||||||
search_term: Optional[str] = None,
|
search_term: Optional[str] = None,
|
||||||
) -> OffsetPaginatedResults[ImageRecord]:
|
) -> OffsetPaginatedResults[ImageRecord]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
# Manually build two queries - one for the count, one for the records
|
|
||||||
count_query = """--sql
|
# Manually build two queries - one for the count, one for the records
|
||||||
SELECT COUNT(*)
|
count_query = """--sql
|
||||||
FROM images
|
SELECT COUNT(*)
|
||||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
FROM images
|
||||||
WHERE 1=1
|
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||||
|
WHERE 1=1
|
||||||
|
"""
|
||||||
|
|
||||||
|
images_query = f"""--sql
|
||||||
|
SELECT {IMAGE_DTO_COLS}
|
||||||
|
FROM images
|
||||||
|
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||||
|
WHERE 1=1
|
||||||
|
"""
|
||||||
|
|
||||||
|
query_conditions = ""
|
||||||
|
query_params: list[Union[int, str, bool]] = []
|
||||||
|
|
||||||
|
if image_origin is not None:
|
||||||
|
query_conditions += """--sql
|
||||||
|
AND images.image_origin = ?
|
||||||
|
"""
|
||||||
|
query_params.append(image_origin.value)
|
||||||
|
|
||||||
|
if categories is not None:
|
||||||
|
# Convert the enum values to unique list of strings
|
||||||
|
category_strings = [c.value for c in set(categories)]
|
||||||
|
# Create the correct length of placeholders
|
||||||
|
placeholders = ",".join("?" * len(category_strings))
|
||||||
|
|
||||||
|
query_conditions += f"""--sql
|
||||||
|
AND images.image_category IN ( {placeholders} )
|
||||||
"""
|
"""
|
||||||
|
|
||||||
images_query = f"""--sql
|
# Unpack the included categories into the query params
|
||||||
SELECT {IMAGE_DTO_COLS}
|
for c in category_strings:
|
||||||
FROM images
|
query_params.append(c)
|
||||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
|
||||||
WHERE 1=1
|
if is_intermediate is not None:
|
||||||
|
query_conditions += """--sql
|
||||||
|
AND images.is_intermediate = ?
|
||||||
"""
|
"""
|
||||||
|
|
||||||
query_conditions = ""
|
query_params.append(is_intermediate)
|
||||||
query_params: list[Union[int, str, bool]] = []
|
|
||||||
|
|
||||||
if image_origin is not None:
|
# board_id of "none" is reserved for images without a board
|
||||||
query_conditions += """--sql
|
if board_id == "none":
|
||||||
AND images.image_origin = ?
|
query_conditions += """--sql
|
||||||
"""
|
AND board_images.board_id IS NULL
|
||||||
query_params.append(image_origin.value)
|
"""
|
||||||
|
elif board_id is not None:
|
||||||
|
query_conditions += """--sql
|
||||||
|
AND board_images.board_id = ?
|
||||||
|
"""
|
||||||
|
query_params.append(board_id)
|
||||||
|
|
||||||
if categories is not None:
|
# Search term condition
|
||||||
# Convert the enum values to unique list of strings
|
if search_term:
|
||||||
category_strings = [c.value for c in set(categories)]
|
query_conditions += """--sql
|
||||||
# Create the correct length of placeholders
|
AND (
|
||||||
placeholders = ",".join("?" * len(category_strings))
|
images.metadata LIKE ?
|
||||||
|
OR images.created_at LIKE ?
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
query_params.append(f"%{search_term.lower()}%")
|
||||||
|
query_params.append(f"%{search_term.lower()}%")
|
||||||
|
|
||||||
query_conditions += f"""--sql
|
if starred_first:
|
||||||
AND images.image_category IN ( {placeholders} )
|
query_pagination = f"""--sql
|
||||||
"""
|
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
query_pagination = f"""--sql
|
||||||
|
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||||
|
"""
|
||||||
|
|
||||||
# Unpack the included categories into the query params
|
# Final images query with pagination
|
||||||
for c in category_strings:
|
images_query += query_conditions + query_pagination + ";"
|
||||||
query_params.append(c)
|
# Add all the parameters
|
||||||
|
images_params = query_params.copy()
|
||||||
|
# Add the pagination parameters
|
||||||
|
images_params.extend([limit, offset])
|
||||||
|
|
||||||
if is_intermediate is not None:
|
# Build the list of images, deserializing each row
|
||||||
query_conditions += """--sql
|
cursor.execute(images_query, images_params)
|
||||||
AND images.is_intermediate = ?
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
"""
|
images = [deserialize_image_record(dict(r)) for r in result]
|
||||||
|
|
||||||
query_params.append(is_intermediate)
|
# Set up and execute the count query, without pagination
|
||||||
|
count_query += query_conditions + ";"
|
||||||
# board_id of "none" is reserved for images without a board
|
count_params = query_params.copy()
|
||||||
if board_id == "none":
|
cursor.execute(count_query, count_params)
|
||||||
query_conditions += """--sql
|
count = cast(int, cursor.fetchone()[0])
|
||||||
AND board_images.board_id IS NULL
|
|
||||||
"""
|
|
||||||
elif board_id is not None:
|
|
||||||
query_conditions += """--sql
|
|
||||||
AND board_images.board_id = ?
|
|
||||||
"""
|
|
||||||
query_params.append(board_id)
|
|
||||||
|
|
||||||
# Search term condition
|
|
||||||
if search_term:
|
|
||||||
query_conditions += """--sql
|
|
||||||
AND (
|
|
||||||
images.metadata LIKE ?
|
|
||||||
OR images.created_at LIKE ?
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
query_params.append(f"%{search_term.lower()}%")
|
|
||||||
query_params.append(f"%{search_term.lower()}%")
|
|
||||||
|
|
||||||
if starred_first:
|
|
||||||
query_pagination = f"""--sql
|
|
||||||
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
|
||||||
"""
|
|
||||||
else:
|
|
||||||
query_pagination = f"""--sql
|
|
||||||
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Final images query with pagination
|
|
||||||
images_query += query_conditions + query_pagination + ";"
|
|
||||||
# Add all the parameters
|
|
||||||
images_params = query_params.copy()
|
|
||||||
# Add the pagination parameters
|
|
||||||
images_params.extend([limit, offset])
|
|
||||||
|
|
||||||
# Build the list of images, deserializing each row
|
|
||||||
cursor.execute(images_query, images_params)
|
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
|
||||||
|
|
||||||
images = [deserialize_image_record(dict(r)) for r in result]
|
|
||||||
|
|
||||||
# Set up and execute the count query, without pagination
|
|
||||||
count_query += query_conditions + ";"
|
|
||||||
count_params = query_params.copy()
|
|
||||||
cursor.execute(count_query, count_params)
|
|
||||||
count = cast(int, cursor.fetchone()[0])
|
|
||||||
|
|
||||||
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
||||||
|
|
||||||
def delete(self, image_name: str) -> None:
|
def delete(self, image_name: str) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
DELETE FROM images
|
|
||||||
WHERE image_name = ?;
|
|
||||||
""",
|
|
||||||
(image_name,),
|
|
||||||
)
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
raise ImageRecordDeleteException from e
|
|
||||||
|
|
||||||
def delete_many(self, image_names: list[str]) -> None:
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
try:
|
|
||||||
placeholders = ",".join("?" for _ in image_names)
|
|
||||||
|
|
||||||
# Construct the SQLite query with the placeholders
|
|
||||||
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
|
|
||||||
|
|
||||||
# Execute the query with the list of IDs as parameters
|
|
||||||
cursor.execute(query, image_names)
|
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
raise ImageRecordDeleteException from e
|
|
||||||
|
|
||||||
def get_intermediates_count(self) -> int:
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT COUNT(*) FROM images
|
DELETE FROM images
|
||||||
WHERE is_intermediate = TRUE;
|
WHERE image_name = ?;
|
||||||
"""
|
""",
|
||||||
|
(image_name,),
|
||||||
)
|
)
|
||||||
count = cast(int, cursor.fetchone()[0])
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordDeleteException from e
|
||||||
|
|
||||||
|
def delete_many(self, image_names: list[str]) -> None:
|
||||||
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
|
||||||
|
placeholders = ",".join("?" for _ in image_names)
|
||||||
|
|
||||||
|
# Construct the SQLite query with the placeholders
|
||||||
|
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
|
||||||
|
|
||||||
|
# Execute the query with the list of IDs as parameters
|
||||||
|
cursor.execute(query, image_names)
|
||||||
|
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordDeleteException from e
|
||||||
|
|
||||||
|
def get_intermediates_count(self) -> int:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT COUNT(*) FROM images
|
||||||
|
WHERE is_intermediate = TRUE;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
count = cast(int, cursor.fetchone()[0])
|
||||||
|
self._conn.commit()
|
||||||
return count
|
return count
|
||||||
|
|
||||||
def delete_intermediates(self) -> list[str]:
|
def delete_intermediates(self) -> list[str]:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT image_name FROM images
|
SELECT image_name FROM images
|
||||||
WHERE is_intermediate = TRUE;
|
WHERE is_intermediate = TRUE;
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
image_names = [r[0] for r in result]
|
image_names = [r[0] for r in result]
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE FROM images
|
DELETE FROM images
|
||||||
WHERE is_intermediate = TRUE;
|
WHERE is_intermediate = TRUE;
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
except sqlite3.Error as e:
|
self._conn.commit()
|
||||||
raise ImageRecordDeleteException from e
|
return image_names
|
||||||
return image_names
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordDeleteException from e
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
self,
|
self,
|
||||||
@@ -306,71 +315,73 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
node_id: Optional[str] = None,
|
node_id: Optional[str] = None,
|
||||||
metadata: Optional[str] = None,
|
metadata: Optional[str] = None,
|
||||||
) -> datetime:
|
) -> datetime:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
INSERT OR IGNORE INTO images (
|
|
||||||
image_name,
|
|
||||||
image_origin,
|
|
||||||
image_category,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
node_id,
|
|
||||||
session_id,
|
|
||||||
metadata,
|
|
||||||
is_intermediate,
|
|
||||||
starred,
|
|
||||||
has_workflow
|
|
||||||
)
|
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
|
||||||
""",
|
|
||||||
(
|
|
||||||
image_name,
|
|
||||||
image_origin.value,
|
|
||||||
image_category.value,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
node_id,
|
|
||||||
session_id,
|
|
||||||
metadata,
|
|
||||||
is_intermediate,
|
|
||||||
starred,
|
|
||||||
has_workflow,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
SELECT created_at
|
|
||||||
FROM images
|
|
||||||
WHERE image_name = ?;
|
|
||||||
""",
|
|
||||||
(image_name,),
|
|
||||||
)
|
|
||||||
|
|
||||||
created_at = datetime.fromisoformat(cursor.fetchone()[0])
|
|
||||||
|
|
||||||
except sqlite3.Error as e:
|
|
||||||
raise ImageRecordSaveException from e
|
|
||||||
return created_at
|
|
||||||
|
|
||||||
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT images.*
|
INSERT OR IGNORE INTO images (
|
||||||
FROM images
|
image_name,
|
||||||
JOIN board_images ON images.image_name = board_images.image_name
|
image_origin,
|
||||||
WHERE board_images.board_id = ?
|
image_category,
|
||||||
AND images.is_intermediate = FALSE
|
width,
|
||||||
ORDER BY images.starred DESC, images.created_at DESC
|
height,
|
||||||
LIMIT 1;
|
node_id,
|
||||||
|
session_id,
|
||||||
|
metadata,
|
||||||
|
is_intermediate,
|
||||||
|
starred,
|
||||||
|
has_workflow
|
||||||
|
)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||||
""",
|
""",
|
||||||
(board_id,),
|
(
|
||||||
|
image_name,
|
||||||
|
image_origin.value,
|
||||||
|
image_category.value,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
node_id,
|
||||||
|
session_id,
|
||||||
|
metadata,
|
||||||
|
is_intermediate,
|
||||||
|
starred,
|
||||||
|
has_workflow,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT created_at
|
||||||
|
FROM images
|
||||||
|
WHERE image_name = ?;
|
||||||
|
""",
|
||||||
|
(image_name,),
|
||||||
)
|
)
|
||||||
|
|
||||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
created_at = datetime.fromisoformat(cursor.fetchone()[0])
|
||||||
|
|
||||||
|
return created_at
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise ImageRecordSaveException from e
|
||||||
|
|
||||||
|
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT images.*
|
||||||
|
FROM images
|
||||||
|
JOIN board_images ON images.image_name = board_images.image_name
|
||||||
|
WHERE board_images.board_id = ?
|
||||||
|
AND images.is_intermediate = FALSE
|
||||||
|
ORDER BY images.starred DESC, images.created_at DESC
|
||||||
|
LIMIT 1;
|
||||||
|
""",
|
||||||
|
(board_id,),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||||
|
|
||||||
if result is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
@@ -387,84 +398,85 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
|||||||
board_id: Optional[str] = None,
|
board_id: Optional[str] = None,
|
||||||
search_term: Optional[str] = None,
|
search_term: Optional[str] = None,
|
||||||
) -> ImageNamesResult:
|
) -> ImageNamesResult:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
# Build query conditions (reused for both starred count and image names queries)
|
|
||||||
query_conditions = ""
|
|
||||||
query_params: list[Union[int, str, bool]] = []
|
|
||||||
|
|
||||||
if image_origin is not None:
|
# Build query conditions (reused for both starred count and image names queries)
|
||||||
query_conditions += """--sql
|
query_conditions = ""
|
||||||
AND images.image_origin = ?
|
query_params: list[Union[int, str, bool]] = []
|
||||||
"""
|
|
||||||
query_params.append(image_origin.value)
|
|
||||||
|
|
||||||
if categories is not None:
|
if image_origin is not None:
|
||||||
category_strings = [c.value for c in set(categories)]
|
query_conditions += """--sql
|
||||||
placeholders = ",".join("?" * len(category_strings))
|
AND images.image_origin = ?
|
||||||
query_conditions += f"""--sql
|
"""
|
||||||
AND images.image_category IN ( {placeholders} )
|
query_params.append(image_origin.value)
|
||||||
"""
|
|
||||||
for c in category_strings:
|
|
||||||
query_params.append(c)
|
|
||||||
|
|
||||||
if is_intermediate is not None:
|
if categories is not None:
|
||||||
query_conditions += """--sql
|
category_strings = [c.value for c in set(categories)]
|
||||||
AND images.is_intermediate = ?
|
placeholders = ",".join("?" * len(category_strings))
|
||||||
"""
|
query_conditions += f"""--sql
|
||||||
query_params.append(is_intermediate)
|
AND images.image_category IN ( {placeholders} )
|
||||||
|
"""
|
||||||
|
for c in category_strings:
|
||||||
|
query_params.append(c)
|
||||||
|
|
||||||
if board_id == "none":
|
if is_intermediate is not None:
|
||||||
query_conditions += """--sql
|
query_conditions += """--sql
|
||||||
AND board_images.board_id IS NULL
|
AND images.is_intermediate = ?
|
||||||
"""
|
"""
|
||||||
elif board_id is not None:
|
query_params.append(is_intermediate)
|
||||||
query_conditions += """--sql
|
|
||||||
AND board_images.board_id = ?
|
|
||||||
"""
|
|
||||||
query_params.append(board_id)
|
|
||||||
|
|
||||||
if search_term:
|
if board_id == "none":
|
||||||
query_conditions += """--sql
|
query_conditions += """--sql
|
||||||
AND (
|
AND board_images.board_id IS NULL
|
||||||
images.metadata LIKE ?
|
"""
|
||||||
OR images.created_at LIKE ?
|
elif board_id is not None:
|
||||||
)
|
query_conditions += """--sql
|
||||||
"""
|
AND board_images.board_id = ?
|
||||||
query_params.append(f"%{search_term.lower()}%")
|
"""
|
||||||
query_params.append(f"%{search_term.lower()}%")
|
query_params.append(board_id)
|
||||||
|
|
||||||
# Get starred count if starred_first is enabled
|
if search_term:
|
||||||
starred_count = 0
|
query_conditions += """--sql
|
||||||
if starred_first:
|
AND (
|
||||||
starred_count_query = f"""--sql
|
images.metadata LIKE ?
|
||||||
SELECT COUNT(*)
|
OR images.created_at LIKE ?
|
||||||
FROM images
|
)
|
||||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
"""
|
||||||
WHERE images.starred = TRUE AND (1=1{query_conditions})
|
query_params.append(f"%{search_term.lower()}%")
|
||||||
"""
|
query_params.append(f"%{search_term.lower()}%")
|
||||||
cursor.execute(starred_count_query, query_params)
|
|
||||||
starred_count = cast(int, cursor.fetchone()[0])
|
|
||||||
|
|
||||||
# Get all image names with proper ordering
|
# Get starred count if starred_first is enabled
|
||||||
if starred_first:
|
starred_count = 0
|
||||||
names_query = f"""--sql
|
if starred_first:
|
||||||
SELECT images.image_name
|
starred_count_query = f"""--sql
|
||||||
FROM images
|
SELECT COUNT(*)
|
||||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
FROM images
|
||||||
WHERE 1=1{query_conditions}
|
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||||
ORDER BY images.starred DESC, images.created_at {order_dir.value}
|
WHERE images.starred = TRUE AND (1=1{query_conditions})
|
||||||
"""
|
"""
|
||||||
else:
|
cursor.execute(starred_count_query, query_params)
|
||||||
names_query = f"""--sql
|
starred_count = cast(int, cursor.fetchone()[0])
|
||||||
SELECT images.image_name
|
|
||||||
FROM images
|
|
||||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
|
||||||
WHERE 1=1{query_conditions}
|
|
||||||
ORDER BY images.created_at {order_dir.value}
|
|
||||||
"""
|
|
||||||
|
|
||||||
cursor.execute(names_query, query_params)
|
# Get all image names with proper ordering
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
if starred_first:
|
||||||
|
names_query = f"""--sql
|
||||||
|
SELECT images.image_name
|
||||||
|
FROM images
|
||||||
|
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||||
|
WHERE 1=1{query_conditions}
|
||||||
|
ORDER BY images.starred DESC, images.created_at {order_dir.value}
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
names_query = f"""--sql
|
||||||
|
SELECT images.image_name
|
||||||
|
FROM images
|
||||||
|
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||||
|
WHERE 1=1{query_conditions}
|
||||||
|
ORDER BY images.created_at {order_dir.value}
|
||||||
|
"""
|
||||||
|
|
||||||
|
cursor.execute(names_query, query_params)
|
||||||
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
image_names = [row[0] for row in result]
|
image_names = [row[0] for row in result]
|
||||||
|
|
||||||
return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names))
|
return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names))
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ if TYPE_CHECKING:
|
|||||||
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
|
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
|
||||||
from invokeai.app.services.boards.boards_base import BoardServiceABC
|
from invokeai.app.services.boards.boards_base import BoardServiceABC
|
||||||
from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase
|
from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase
|
||||||
from invokeai.app.services.client_state_persistence.client_state_persistence_base import ClientStatePersistenceABC
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.app.services.download import DownloadQueueServiceBase
|
from invokeai.app.services.download import DownloadQueueServiceBase
|
||||||
from invokeai.app.services.events.events_base import EventServiceBase
|
from invokeai.app.services.events.events_base import EventServiceBase
|
||||||
@@ -74,7 +73,6 @@ class InvocationServices:
|
|||||||
style_preset_records: "StylePresetRecordsStorageBase",
|
style_preset_records: "StylePresetRecordsStorageBase",
|
||||||
style_preset_image_files: "StylePresetImageFileStorageBase",
|
style_preset_image_files: "StylePresetImageFileStorageBase",
|
||||||
workflow_thumbnails: "WorkflowThumbnailServiceBase",
|
workflow_thumbnails: "WorkflowThumbnailServiceBase",
|
||||||
client_state_persistence: "ClientStatePersistenceABC",
|
|
||||||
):
|
):
|
||||||
self.board_images = board_images
|
self.board_images = board_images
|
||||||
self.board_image_records = board_image_records
|
self.board_image_records = board_image_records
|
||||||
@@ -104,4 +102,3 @@ class InvocationServices:
|
|||||||
self.style_preset_records = style_preset_records
|
self.style_preset_records = style_preset_records
|
||||||
self.style_preset_image_files = style_preset_image_files
|
self.style_preset_image_files = style_preset_image_files
|
||||||
self.workflow_thumbnails = workflow_thumbnails
|
self.workflow_thumbnails = workflow_thumbnails
|
||||||
self.client_state_persistence = client_state_persistence
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import threading
|
|||||||
import time
|
import time
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from queue import Empty, Queue
|
from queue import Empty, Queue
|
||||||
from shutil import move, rmtree
|
from shutil import copyfile, copytree, move, rmtree
|
||||||
from tempfile import mkdtemp
|
from tempfile import mkdtemp
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
|
||||||
|
|
||||||
@@ -51,7 +51,6 @@ from invokeai.backend.model_manager.metadata import (
|
|||||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||||
from invokeai.backend.model_manager.search import ModelSearch
|
from invokeai.backend.model_manager.search import ModelSearch
|
||||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||||
from invokeai.backend.model_manager.util.lora_metadata_extractor import apply_lora_metadata
|
|
||||||
from invokeai.backend.util import InvokeAILogger
|
from invokeai.backend.util import InvokeAILogger
|
||||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||||
from invokeai.backend.util.devices import TorchDevice
|
from invokeai.backend.util.devices import TorchDevice
|
||||||
@@ -186,14 +185,13 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
|
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
|
||||||
|
|
||||||
if preferred_name := config.name:
|
if preferred_name := config.name:
|
||||||
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
|
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
|
||||||
preferred_name = f"{preferred_name}{model_path.suffix}"
|
|
||||||
|
|
||||||
dest_path = (
|
dest_path = (
|
||||||
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
|
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
new_path = self._move_model(model_path, dest_path)
|
new_path = self._copy_model(model_path, dest_path)
|
||||||
except FileExistsError as excp:
|
except FileExistsError as excp:
|
||||||
raise DuplicateModelException(
|
raise DuplicateModelException(
|
||||||
f"A model named {model_path.name} is already installed at {dest_path.as_posix()}"
|
f"A model named {model_path.name} is already installed at {dest_path.as_posix()}"
|
||||||
@@ -618,6 +616,16 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
self.record_store.update_model(key, ModelRecordChanges(path=model.path))
|
self.record_store.update_model(key, ModelRecordChanges(path=model.path))
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
|
||||||
|
if old_path == new_path:
|
||||||
|
return old_path
|
||||||
|
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
if old_path.is_dir():
|
||||||
|
copytree(old_path, new_path)
|
||||||
|
else:
|
||||||
|
copyfile(old_path, new_path)
|
||||||
|
return new_path
|
||||||
|
|
||||||
def _move_model(self, old_path: Path, new_path: Path) -> Path:
|
def _move_model(self, old_path: Path, new_path: Path) -> Path:
|
||||||
if old_path == new_path:
|
if old_path == new_path:
|
||||||
return old_path
|
return old_path
|
||||||
@@ -659,10 +667,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
|
|
||||||
info = info or self._probe(model_path, config)
|
info = info or self._probe(model_path, config)
|
||||||
|
|
||||||
# Apply LoRA metadata if applicable
|
|
||||||
model_images_path = self.app_config.models_path / "model_images"
|
|
||||||
apply_lora_metadata(info, model_path.resolve(), model_images_path)
|
|
||||||
|
|
||||||
model_path = model_path.resolve()
|
model_path = model_path.resolve()
|
||||||
|
|
||||||
# Models in the Invoke-managed models dir should use relative paths.
|
# Models in the Invoke-managed models dir should use relative paths.
|
||||||
|
|||||||
@@ -78,6 +78,11 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
self._db = db
|
self._db = db
|
||||||
self._logger = logger
|
self._logger = logger
|
||||||
|
|
||||||
|
@property
|
||||||
|
def db(self) -> SqliteDatabase:
|
||||||
|
"""Return the underlying database."""
|
||||||
|
return self._db
|
||||||
|
|
||||||
def add_model(self, config: AnyModelConfig) -> AnyModelConfig:
|
def add_model(self, config: AnyModelConfig) -> AnyModelConfig:
|
||||||
"""
|
"""
|
||||||
Add a model to the database.
|
Add a model to the database.
|
||||||
@@ -88,33 +93,38 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
|
|
||||||
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
|
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
|
||||||
"""
|
"""
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
try:
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT INTO models (
|
INSERT INTO models (
|
||||||
id,
|
id,
|
||||||
config
|
config
|
||||||
)
|
)
|
||||||
VALUES (?,?);
|
VALUES (?,?);
|
||||||
""",
|
""",
|
||||||
(
|
(
|
||||||
config.key,
|
config.key,
|
||||||
config.model_dump_json(),
|
config.model_dump_json(),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
self._db.conn.commit()
|
||||||
|
|
||||||
except sqlite3.IntegrityError as e:
|
except sqlite3.IntegrityError as e:
|
||||||
if "UNIQUE constraint failed" in str(e):
|
self._db.conn.rollback()
|
||||||
if "models.path" in str(e):
|
if "UNIQUE constraint failed" in str(e):
|
||||||
msg = f"A model with path '{config.path}' is already installed"
|
if "models.path" in str(e):
|
||||||
elif "models.name" in str(e):
|
msg = f"A model with path '{config.path}' is already installed"
|
||||||
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
|
elif "models.name" in str(e):
|
||||||
else:
|
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
|
||||||
msg = f"A model with key '{config.key}' is already installed"
|
|
||||||
raise DuplicateModelException(msg) from e
|
|
||||||
else:
|
else:
|
||||||
raise e
|
msg = f"A model with key '{config.key}' is already installed"
|
||||||
|
raise DuplicateModelException(msg) from e
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._db.conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
return self.get_model(config.key)
|
return self.get_model(config.key)
|
||||||
|
|
||||||
@@ -126,7 +136,8 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
|
|
||||||
Can raise an UnknownModelException
|
Can raise an UnknownModelException
|
||||||
"""
|
"""
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE FROM models
|
DELETE FROM models
|
||||||
@@ -136,17 +147,22 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
)
|
)
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
raise UnknownModelException("model not found")
|
raise UnknownModelException("model not found")
|
||||||
|
self._db.conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._db.conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig:
|
def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig:
|
||||||
with self._db.transaction() as cursor:
|
record = self.get_model(key)
|
||||||
record = self.get_model(key)
|
|
||||||
|
|
||||||
# Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic.
|
# Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic.
|
||||||
for field_name in changes.model_fields_set:
|
for field_name in changes.model_fields_set:
|
||||||
setattr(record, field_name, getattr(changes, field_name))
|
setattr(record, field_name, getattr(changes, field_name))
|
||||||
|
|
||||||
json_serialized = record.model_dump_json()
|
json_serialized = record.model_dump_json()
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE models
|
UPDATE models
|
||||||
@@ -158,6 +174,10 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
)
|
)
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
raise UnknownModelException("model not found")
|
raise UnknownModelException("model not found")
|
||||||
|
self._db.conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._db.conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
return self.get_model(key)
|
return self.get_model(key)
|
||||||
|
|
||||||
@@ -169,30 +189,30 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
|
|
||||||
Exceptions: UnknownModelException
|
Exceptions: UnknownModelException
|
||||||
"""
|
"""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT config, strftime('%s',updated_at) FROM models
|
SELECT config, strftime('%s',updated_at) FROM models
|
||||||
WHERE id=?;
|
WHERE id=?;
|
||||||
""",
|
""",
|
||||||
(key,),
|
(key,),
|
||||||
)
|
)
|
||||||
rows = cursor.fetchone()
|
rows = cursor.fetchone()
|
||||||
if not rows:
|
if not rows:
|
||||||
raise UnknownModelException("model not found")
|
raise UnknownModelException("model not found")
|
||||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
|
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT config, strftime('%s',updated_at) FROM models
|
SELECT config, strftime('%s',updated_at) FROM models
|
||||||
WHERE hash=?;
|
WHERE hash=?;
|
||||||
""",
|
""",
|
||||||
(hash,),
|
(hash,),
|
||||||
)
|
)
|
||||||
rows = cursor.fetchone()
|
rows = cursor.fetchone()
|
||||||
if not rows:
|
if not rows:
|
||||||
raise UnknownModelException("model not found")
|
raise UnknownModelException("model not found")
|
||||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||||
@@ -204,15 +224,15 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
|
|
||||||
:param key: Unique key for the model to be deleted
|
:param key: Unique key for the model to be deleted
|
||||||
"""
|
"""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
select count(*) FROM models
|
select count(*) FROM models
|
||||||
WHERE id=?;
|
WHERE id=?;
|
||||||
""",
|
""",
|
||||||
(key,),
|
(key,),
|
||||||
)
|
)
|
||||||
count = cursor.fetchone()[0]
|
count = cursor.fetchone()[0]
|
||||||
return count > 0
|
return count > 0
|
||||||
|
|
||||||
def search_by_attr(
|
def search_by_attr(
|
||||||
@@ -235,42 +255,43 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
If none of the optional filters are passed, will return all
|
If none of the optional filters are passed, will return all
|
||||||
models in the database.
|
models in the database.
|
||||||
"""
|
"""
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
assert isinstance(order_by, ModelRecordOrderBy)
|
|
||||||
ordering = {
|
|
||||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
|
||||||
ModelRecordOrderBy.Type: "type",
|
|
||||||
ModelRecordOrderBy.Base: "base",
|
|
||||||
ModelRecordOrderBy.Name: "name",
|
|
||||||
ModelRecordOrderBy.Format: "format",
|
|
||||||
}
|
|
||||||
|
|
||||||
where_clause: list[str] = []
|
assert isinstance(order_by, ModelRecordOrderBy)
|
||||||
bindings: list[str] = []
|
ordering = {
|
||||||
if model_name:
|
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||||
where_clause.append("name=?")
|
ModelRecordOrderBy.Type: "type",
|
||||||
bindings.append(model_name)
|
ModelRecordOrderBy.Base: "base",
|
||||||
if base_model:
|
ModelRecordOrderBy.Name: "name",
|
||||||
where_clause.append("base=?")
|
ModelRecordOrderBy.Format: "format",
|
||||||
bindings.append(base_model)
|
}
|
||||||
if model_type:
|
|
||||||
where_clause.append("type=?")
|
|
||||||
bindings.append(model_type)
|
|
||||||
if model_format:
|
|
||||||
where_clause.append("format=?")
|
|
||||||
bindings.append(model_format)
|
|
||||||
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
|
||||||
|
|
||||||
cursor.execute(
|
where_clause: list[str] = []
|
||||||
f"""--sql
|
bindings: list[str] = []
|
||||||
SELECT config, strftime('%s',updated_at)
|
if model_name:
|
||||||
FROM models
|
where_clause.append("name=?")
|
||||||
{where}
|
bindings.append(model_name)
|
||||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
if base_model:
|
||||||
""",
|
where_clause.append("base=?")
|
||||||
tuple(bindings),
|
bindings.append(base_model)
|
||||||
)
|
if model_type:
|
||||||
result = cursor.fetchall()
|
where_clause.append("type=?")
|
||||||
|
bindings.append(model_type)
|
||||||
|
if model_format:
|
||||||
|
where_clause.append("format=?")
|
||||||
|
bindings.append(model_format)
|
||||||
|
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
||||||
|
|
||||||
|
cursor = self._db.conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
SELECT config, strftime('%s',updated_at)
|
||||||
|
FROM models
|
||||||
|
{where}
|
||||||
|
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
||||||
|
""",
|
||||||
|
tuple(bindings),
|
||||||
|
)
|
||||||
|
result = cursor.fetchall()
|
||||||
|
|
||||||
# Parse the model configs.
|
# Parse the model configs.
|
||||||
results: list[AnyModelConfig] = []
|
results: list[AnyModelConfig] = []
|
||||||
@@ -292,68 +313,69 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
|
|
||||||
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
|
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
|
||||||
"""Return models with the indicated path."""
|
"""Return models with the indicated path."""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT config, strftime('%s',updated_at) FROM models
|
SELECT config, strftime('%s',updated_at) FROM models
|
||||||
WHERE path=?;
|
WHERE path=?;
|
||||||
""",
|
""",
|
||||||
(str(path),),
|
(str(path),),
|
||||||
)
|
)
|
||||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
|
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
|
||||||
"""Return models with the indicated hash."""
|
"""Return models with the indicated hash."""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._db.conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT config, strftime('%s',updated_at) FROM models
|
SELECT config, strftime('%s',updated_at) FROM models
|
||||||
WHERE hash=?;
|
WHERE hash=?;
|
||||||
""",
|
""",
|
||||||
(hash,),
|
(hash,),
|
||||||
)
|
)
|
||||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def list_models(
|
def list_models(
|
||||||
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||||
) -> PaginatedResults[ModelSummary]:
|
) -> PaginatedResults[ModelSummary]:
|
||||||
"""Return a paginated summary listing of each model in the database."""
|
"""Return a paginated summary listing of each model in the database."""
|
||||||
with self._db.transaction() as cursor:
|
assert isinstance(order_by, ModelRecordOrderBy)
|
||||||
assert isinstance(order_by, ModelRecordOrderBy)
|
ordering = {
|
||||||
ordering = {
|
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
ModelRecordOrderBy.Type: "type",
|
||||||
ModelRecordOrderBy.Type: "type",
|
ModelRecordOrderBy.Base: "base",
|
||||||
ModelRecordOrderBy.Base: "base",
|
ModelRecordOrderBy.Name: "name",
|
||||||
ModelRecordOrderBy.Name: "name",
|
ModelRecordOrderBy.Format: "format",
|
||||||
ModelRecordOrderBy.Format: "format",
|
}
|
||||||
}
|
|
||||||
|
|
||||||
# Lock so that the database isn't updated while we're doing the two queries.
|
cursor = self._db.conn.cursor()
|
||||||
# query1: get the total number of model configs
|
|
||||||
cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
select count(*) from models;
|
|
||||||
""",
|
|
||||||
(),
|
|
||||||
)
|
|
||||||
total = int(cursor.fetchone()[0])
|
|
||||||
|
|
||||||
# query2: fetch key fields
|
# Lock so that the database isn't updated while we're doing the two queries.
|
||||||
cursor.execute(
|
# query1: get the total number of model configs
|
||||||
f"""--sql
|
cursor.execute(
|
||||||
SELECT config
|
"""--sql
|
||||||
FROM models
|
select count(*) from models;
|
||||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
""",
|
||||||
LIMIT ?
|
(),
|
||||||
OFFSET ?;
|
)
|
||||||
""",
|
total = int(cursor.fetchone()[0])
|
||||||
(
|
|
||||||
per_page,
|
# query2: fetch key fields
|
||||||
page * per_page,
|
cursor.execute(
|
||||||
),
|
f"""--sql
|
||||||
)
|
SELECT config
|
||||||
rows = cursor.fetchall()
|
FROM models
|
||||||
|
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||||
|
LIMIT ?
|
||||||
|
OFFSET ?;
|
||||||
|
""",
|
||||||
|
(
|
||||||
|
per_page,
|
||||||
|
page * per_page,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
rows = cursor.fetchall()
|
||||||
items = [ModelSummary.model_validate(dict(x)) for x in rows]
|
items = [ModelSummary.model_validate(dict(x)) for x in rows]
|
||||||
return PaginatedResults(page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items)
|
return PaginatedResults(page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items)
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
import sqlite3
|
||||||
|
|
||||||
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
|
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
|
||||||
ModelRelationshipRecordStorageBase,
|
ModelRelationshipRecordStorageBase,
|
||||||
)
|
)
|
||||||
@@ -7,49 +9,58 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
|||||||
class SqliteModelRelationshipRecordStorage(ModelRelationshipRecordStorageBase):
|
class SqliteModelRelationshipRecordStorage(ModelRelationshipRecordStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||||
with self._db.transaction() as cursor:
|
if model_key_1 == model_key_2:
|
||||||
if model_key_1 == model_key_2:
|
raise ValueError("Cannot relate a model to itself.")
|
||||||
raise ValueError("Cannot relate a model to itself.")
|
a, b = sorted([model_key_1, model_key_2])
|
||||||
a, b = sorted([model_key_1, model_key_2])
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"INSERT OR IGNORE INTO model_relationships (model_key_1, model_key_2) VALUES (?, ?)",
|
"INSERT OR IGNORE INTO model_relationships (model_key_1, model_key_2) VALUES (?, ?)",
|
||||||
(a, b),
|
(a, b),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||||
with self._db.transaction() as cursor:
|
a, b = sorted([model_key_1, model_key_2])
|
||||||
a, b = sorted([model_key_1, model_key_2])
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"DELETE FROM model_relationships WHERE model_key_1 = ? AND model_key_2 = ?",
|
"DELETE FROM model_relationships WHERE model_key_1 = ? AND model_key_2 = ?",
|
||||||
(a, b),
|
(a, b),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
def get_related_model_keys(self, model_key: str) -> list[str]:
|
def get_related_model_keys(self, model_key: str) -> list[str]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""
|
"""
|
||||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
|
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
|
||||||
UNION
|
UNION
|
||||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
|
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
|
||||||
""",
|
""",
|
||||||
(model_key, model_key),
|
(model_key, model_key),
|
||||||
)
|
)
|
||||||
result = [row[0] for row in cursor.fetchall()]
|
return [row[0] for row in cursor.fetchall()]
|
||||||
return result
|
|
||||||
|
|
||||||
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
key_list = ",".join("?" for _ in model_keys)
|
|
||||||
cursor.execute(
|
key_list = ",".join("?" for _ in model_keys)
|
||||||
f"""
|
cursor.execute(
|
||||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
|
f"""
|
||||||
UNION
|
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
|
||||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
|
UNION
|
||||||
""",
|
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
|
||||||
model_keys + model_keys,
|
""",
|
||||||
)
|
model_keys + model_keys,
|
||||||
result = [row[0] for row in cursor.fetchall()]
|
)
|
||||||
return result
|
return [row[0] for row in cursor.fetchall()]
|
||||||
|
|||||||
@@ -50,14 +50,15 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
|
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def _set_in_progress_to_canceled(self) -> None:
|
def _set_in_progress_to_canceled(self) -> None:
|
||||||
"""
|
"""
|
||||||
Sets all in_progress queue items to canceled. Run on app startup, not associated with any queue.
|
Sets all in_progress queue items to canceled. Run on app startup, not associated with any queue.
|
||||||
This is necessary because the invoker may have been killed while processing a queue item.
|
This is necessary because the invoker may have been killed while processing a queue item.
|
||||||
"""
|
"""
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE session_queue
|
UPDATE session_queue
|
||||||
@@ -65,79 +66,87 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
WHERE status = 'in_progress';
|
WHERE status = 'in_progress';
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
def _get_current_queue_size(self, queue_id: str) -> int:
|
def _get_current_queue_size(self, queue_id: str) -> int:
|
||||||
"""Gets the current number of pending queue items"""
|
"""Gets the current number of pending queue items"""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT count(*)
|
SELECT count(*)
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE
|
WHERE
|
||||||
queue_id = ?
|
queue_id = ?
|
||||||
AND status = 'pending'
|
AND status = 'pending'
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
count = cast(int, cursor.fetchone()[0])
|
return cast(int, cursor.fetchone()[0])
|
||||||
return count
|
|
||||||
|
|
||||||
def _get_highest_priority(self, queue_id: str) -> int:
|
def _get_highest_priority(self, queue_id: str) -> int:
|
||||||
"""Gets the highest priority value in the queue"""
|
"""Gets the highest priority value in the queue"""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT MAX(priority)
|
SELECT MAX(priority)
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE
|
WHERE
|
||||||
queue_id = ?
|
queue_id = ?
|
||||||
AND status = 'pending'
|
AND status = 'pending'
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
priority = cast(Union[int, None], cursor.fetchone()[0]) or 0
|
return cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||||
return priority
|
|
||||||
|
|
||||||
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||||
current_queue_size = self._get_current_queue_size(queue_id)
|
try:
|
||||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
# TODO: how does this work in a multi-user scenario?
|
||||||
max_new_queue_items = max_queue_size - current_queue_size
|
current_queue_size = self._get_current_queue_size(queue_id)
|
||||||
|
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||||
|
max_new_queue_items = max_queue_size - current_queue_size
|
||||||
|
|
||||||
priority = 0
|
priority = 0
|
||||||
if prepend:
|
if prepend:
|
||||||
priority = self._get_highest_priority(queue_id) + 1
|
priority = self._get_highest_priority(queue_id) + 1
|
||||||
|
|
||||||
requested_count = await asyncio.to_thread(
|
requested_count = await asyncio.to_thread(
|
||||||
calc_session_count,
|
calc_session_count,
|
||||||
batch=batch,
|
batch=batch,
|
||||||
)
|
)
|
||||||
values_to_insert = await asyncio.to_thread(
|
values_to_insert = await asyncio.to_thread(
|
||||||
prepare_values_to_insert,
|
prepare_values_to_insert,
|
||||||
queue_id=queue_id,
|
queue_id=queue_id,
|
||||||
batch=batch,
|
batch=batch,
|
||||||
priority=priority,
|
priority=priority,
|
||||||
max_new_queue_items=max_new_queue_items,
|
max_new_queue_items=max_new_queue_items,
|
||||||
)
|
)
|
||||||
enqueued_count = len(values_to_insert)
|
enqueued_count = len(values_to_insert)
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
with self._conn:
|
||||||
cursor.executemany(
|
cursor = self._conn.cursor()
|
||||||
"""--sql
|
cursor.executemany(
|
||||||
|
"""--sql
|
||||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
values_to_insert,
|
values_to_insert,
|
||||||
)
|
)
|
||||||
cursor.execute(
|
with self._conn:
|
||||||
"""--sql
|
cursor = self._conn.cursor()
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
SELECT item_id
|
SELECT item_id
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE batch_id = ?
|
WHERE batch_id = ?
|
||||||
ORDER BY item_id DESC;
|
ORDER BY item_id DESC;
|
||||||
""",
|
""",
|
||||||
(batch.batch_id,),
|
(batch.batch_id,),
|
||||||
)
|
)
|
||||||
item_ids = [row[0] for row in cursor.fetchall()]
|
item_ids = [row[0] for row in cursor.fetchall()]
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
enqueue_result = EnqueueBatchResult(
|
enqueue_result = EnqueueBatchResult(
|
||||||
queue_id=queue_id,
|
queue_id=queue_id,
|
||||||
requested=requested_count,
|
requested=requested_count,
|
||||||
@@ -150,19 +159,19 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
return enqueue_result
|
return enqueue_result
|
||||||
|
|
||||||
def dequeue(self) -> Optional[SessionQueueItem]:
|
def dequeue(self) -> Optional[SessionQueueItem]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE status = 'pending'
|
WHERE status = 'pending'
|
||||||
ORDER BY
|
ORDER BY
|
||||||
priority DESC,
|
priority DESC,
|
||||||
item_id ASC
|
item_id ASC
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||||
if result is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
queue_item = SessionQueueItem.queue_item_from_dict(dict(result))
|
queue_item = SessionQueueItem.queue_item_from_dict(dict(result))
|
||||||
@@ -170,40 +179,40 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
return queue_item
|
return queue_item
|
||||||
|
|
||||||
def get_next(self, queue_id: str) -> Optional[SessionQueueItem]:
|
def get_next(self, queue_id: str) -> Optional[SessionQueueItem]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE
|
WHERE
|
||||||
queue_id = ?
|
queue_id = ?
|
||||||
AND status = 'pending'
|
AND status = 'pending'
|
||||||
ORDER BY
|
ORDER BY
|
||||||
priority DESC,
|
priority DESC,
|
||||||
created_at ASC
|
created_at ASC
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||||
if result is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||||
|
|
||||||
def get_current(self, queue_id: str) -> Optional[SessionQueueItem]:
|
def get_current(self, queue_id: str) -> Optional[SessionQueueItem]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE
|
WHERE
|
||||||
queue_id = ?
|
queue_id = ?
|
||||||
AND status = 'in_progress'
|
AND status = 'in_progress'
|
||||||
LIMIT 1
|
LIMIT 1
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||||
if result is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||||
@@ -216,7 +225,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
error_message: Optional[str] = None,
|
error_message: Optional[str] = None,
|
||||||
error_traceback: Optional[str] = None,
|
error_traceback: Optional[str] = None,
|
||||||
) -> SessionQueueItem:
|
) -> SessionQueueItem:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT status FROM session_queue WHERE item_id = ?
|
SELECT status FROM session_queue WHERE item_id = ?
|
||||||
@@ -224,15 +234,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
(item_id,),
|
(item_id,),
|
||||||
)
|
)
|
||||||
row = cursor.fetchone()
|
row = cursor.fetchone()
|
||||||
if row is None:
|
if row is None:
|
||||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||||
current_status = row[0]
|
current_status = row[0]
|
||||||
|
# Only update if not already finished (completed, failed or canceled)
|
||||||
# Only update if not already finished (completed, failed or canceled)
|
if current_status in ("completed", "failed", "canceled"):
|
||||||
if current_status in ("completed", "failed", "canceled"):
|
return self.get_queue_item(item_id)
|
||||||
return self.get_queue_item(item_id)
|
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE session_queue
|
UPDATE session_queue
|
||||||
@@ -241,7 +248,10 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(status, error_type, error_message, error_traceback, item_id),
|
(status, error_type, error_message, error_traceback, item_id),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
queue_item = self.get_queue_item(item_id)
|
queue_item = self.get_queue_item(item_id)
|
||||||
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
|
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
|
||||||
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
|
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
|
||||||
@@ -249,34 +259,35 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
return queue_item
|
return queue_item
|
||||||
|
|
||||||
def is_empty(self, queue_id: str) -> IsEmptyResult:
|
def is_empty(self, queue_id: str) -> IsEmptyResult:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT count(*)
|
SELECT count(*)
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE queue_id = ?
|
WHERE queue_id = ?
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
is_empty = cast(int, cursor.fetchone()[0]) == 0
|
is_empty = cast(int, cursor.fetchone()[0]) == 0
|
||||||
return IsEmptyResult(is_empty=is_empty)
|
return IsEmptyResult(is_empty=is_empty)
|
||||||
|
|
||||||
def is_full(self, queue_id: str) -> IsFullResult:
|
def is_full(self, queue_id: str) -> IsFullResult:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT count(*)
|
SELECT count(*)
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE queue_id = ?
|
WHERE queue_id = ?
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||||
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
|
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
|
||||||
return IsFullResult(is_full=is_full)
|
return IsFullResult(is_full=is_full)
|
||||||
|
|
||||||
def clear(self, queue_id: str) -> ClearResult:
|
def clear(self, queue_id: str) -> ClearResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT COUNT(*)
|
SELECT COUNT(*)
|
||||||
@@ -294,19 +305,24 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
self.__invoker.services.events.emit_queue_cleared(queue_id)
|
self.__invoker.services.events.emit_queue_cleared(queue_id)
|
||||||
return ClearResult(deleted=count)
|
return ClearResult(deleted=count)
|
||||||
|
|
||||||
def prune(self, queue_id: str) -> PruneResult:
|
def prune(self, queue_id: str) -> PruneResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
where = """--sql
|
where = """--sql
|
||||||
WHERE
|
WHERE
|
||||||
queue_id = ?
|
queue_id = ?
|
||||||
AND (
|
AND (
|
||||||
status = 'completed'
|
status = 'completed'
|
||||||
OR status = 'failed'
|
OR status = 'failed'
|
||||||
OR status = 'canceled'
|
OR status = 'canceled'
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
f"""--sql
|
f"""--sql
|
||||||
@@ -325,6 +341,10 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return PruneResult(deleted=count)
|
return PruneResult(deleted=count)
|
||||||
|
|
||||||
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
|
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||||
@@ -337,7 +357,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
self.cancel_queue_item(item_id)
|
self.cancel_queue_item(item_id)
|
||||||
except SessionQueueItemNotFoundError:
|
except SessionQueueItemNotFoundError:
|
||||||
pass
|
pass
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE
|
DELETE
|
||||||
@@ -346,6 +367,10 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(item_id,),
|
(item_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
def complete_queue_item(self, item_id: int) -> SessionQueueItem:
|
def complete_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||||
queue_item = self._set_queue_item_status(item_id=item_id, status="completed")
|
queue_item = self._set_queue_item_status(item_id=item_id, status="completed")
|
||||||
@@ -368,7 +393,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
return queue_item
|
return queue_item
|
||||||
|
|
||||||
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
|
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
current_queue_item = self.get_current(queue_id)
|
current_queue_item = self.get_current(queue_id)
|
||||||
placeholders = ", ".join(["?" for _ in batch_ids])
|
placeholders = ", ".join(["?" for _ in batch_ids])
|
||||||
where = f"""--sql
|
where = f"""--sql
|
||||||
@@ -399,14 +425,17 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
tuple(params),
|
tuple(params),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
|
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
|
||||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return CancelByBatchIDsResult(canceled=count)
|
return CancelByBatchIDsResult(canceled=count)
|
||||||
|
|
||||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
current_queue_item = self.get_current(queue_id)
|
current_queue_item = self.get_current(queue_id)
|
||||||
where = """--sql
|
where = """--sql
|
||||||
WHERE
|
WHERE
|
||||||
@@ -436,12 +465,17 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
params,
|
params,
|
||||||
)
|
)
|
||||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
self._conn.commit()
|
||||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||||
|
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return CancelByDestinationResult(canceled=count)
|
return CancelByDestinationResult(canceled=count)
|
||||||
|
|
||||||
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
|
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
current_queue_item = self.get_current(queue_id)
|
current_queue_item = self.get_current(queue_id)
|
||||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||||
self.cancel_queue_item(current_queue_item.item_id)
|
self.cancel_queue_item(current_queue_item.item_id)
|
||||||
@@ -467,10 +501,15 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
params,
|
params,
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return DeleteByDestinationResult(deleted=count)
|
return DeleteByDestinationResult(deleted=count)
|
||||||
|
|
||||||
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
|
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
where = """--sql
|
where = """--sql
|
||||||
WHERE
|
WHERE
|
||||||
queue_id == ?
|
queue_id == ?
|
||||||
@@ -493,10 +532,15 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return DeleteAllExceptCurrentResult(deleted=count)
|
return DeleteAllExceptCurrentResult(deleted=count)
|
||||||
|
|
||||||
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
current_queue_item = self.get_current(queue_id)
|
current_queue_item = self.get_current(queue_id)
|
||||||
where = """--sql
|
where = """--sql
|
||||||
WHERE
|
WHERE
|
||||||
@@ -525,13 +569,18 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
tuple(params),
|
tuple(params),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
|
||||||
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
|
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
|
||||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return CancelByQueueIDResult(canceled=count)
|
return CancelByQueueIDResult(canceled=count)
|
||||||
|
|
||||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
where = """--sql
|
where = """--sql
|
||||||
WHERE
|
WHERE
|
||||||
queue_id == ?
|
queue_id == ?
|
||||||
@@ -554,25 +603,30 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return CancelAllExceptCurrentResult(canceled=count)
|
return CancelAllExceptCurrentResult(canceled=count)
|
||||||
|
|
||||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT * FROM session_queue
|
SELECT * FROM session_queue
|
||||||
WHERE
|
WHERE
|
||||||
item_id = ?
|
item_id = ?
|
||||||
""",
|
""",
|
||||||
(item_id,),
|
(item_id,),
|
||||||
)
|
)
|
||||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||||
if result is None:
|
if result is None:
|
||||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||||
|
|
||||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
|
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
|
||||||
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
|
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
|
||||||
# during execution.
|
# during execution.
|
||||||
@@ -585,6 +639,10 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
""",
|
""",
|
||||||
(session_json, item_id),
|
(session_json, item_id),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return self.get_queue_item(item_id)
|
return self.get_queue_item(item_id)
|
||||||
|
|
||||||
def list_queue_items(
|
def list_queue_items(
|
||||||
@@ -596,42 +654,42 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
status: Optional[QUEUE_ITEM_STATUS] = None,
|
status: Optional[QUEUE_ITEM_STATUS] = None,
|
||||||
destination: Optional[str] = None,
|
destination: Optional[str] = None,
|
||||||
) -> CursorPaginatedResults[SessionQueueItem]:
|
) -> CursorPaginatedResults[SessionQueueItem]:
|
||||||
with self._db.transaction() as cursor_:
|
cursor_ = self._conn.cursor()
|
||||||
item_id = cursor
|
item_id = cursor
|
||||||
query = """--sql
|
query = """--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE queue_id = ?
|
WHERE queue_id = ?
|
||||||
"""
|
"""
|
||||||
params: list[Union[str, int]] = [queue_id]
|
params: list[Union[str, int]] = [queue_id]
|
||||||
|
|
||||||
if status is not None:
|
|
||||||
query += """--sql
|
|
||||||
AND status = ?
|
|
||||||
"""
|
|
||||||
params.append(status)
|
|
||||||
|
|
||||||
if destination is not None:
|
|
||||||
query += """---sql
|
|
||||||
AND destination = ?
|
|
||||||
"""
|
|
||||||
params.append(destination)
|
|
||||||
|
|
||||||
if item_id is not None:
|
|
||||||
query += """--sql
|
|
||||||
AND (priority < ?) OR (priority = ? AND item_id > ?)
|
|
||||||
"""
|
|
||||||
params.extend([priority, priority, item_id])
|
|
||||||
|
|
||||||
|
if status is not None:
|
||||||
query += """--sql
|
query += """--sql
|
||||||
ORDER BY
|
AND status = ?
|
||||||
priority DESC,
|
|
||||||
item_id ASC
|
|
||||||
LIMIT ?
|
|
||||||
"""
|
"""
|
||||||
params.append(limit + 1)
|
params.append(status)
|
||||||
cursor_.execute(query, params)
|
|
||||||
results = cast(list[sqlite3.Row], cursor_.fetchall())
|
if destination is not None:
|
||||||
|
query += """---sql
|
||||||
|
AND destination = ?
|
||||||
|
"""
|
||||||
|
params.append(destination)
|
||||||
|
|
||||||
|
if item_id is not None:
|
||||||
|
query += """--sql
|
||||||
|
AND (priority < ?) OR (priority = ? AND item_id > ?)
|
||||||
|
"""
|
||||||
|
params.extend([priority, priority, item_id])
|
||||||
|
|
||||||
|
query += """--sql
|
||||||
|
ORDER BY
|
||||||
|
priority DESC,
|
||||||
|
item_id ASC
|
||||||
|
LIMIT ?
|
||||||
|
"""
|
||||||
|
params.append(limit + 1)
|
||||||
|
cursor_.execute(query, params)
|
||||||
|
results = cast(list[sqlite3.Row], cursor_.fetchall())
|
||||||
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
|
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
|
||||||
has_more = False
|
has_more = False
|
||||||
if len(items) > limit:
|
if len(items) > limit:
|
||||||
@@ -646,43 +704,43 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
destination: Optional[str] = None,
|
destination: Optional[str] = None,
|
||||||
) -> list[SessionQueueItem]:
|
) -> list[SessionQueueItem]:
|
||||||
"""Gets all queue items that match the given parameters"""
|
"""Gets all queue items that match the given parameters"""
|
||||||
with self._db.transaction() as cursor:
|
cursor_ = self._conn.cursor()
|
||||||
query = """--sql
|
query = """--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE queue_id = ?
|
WHERE queue_id = ?
|
||||||
|
"""
|
||||||
|
params: list[Union[str, int]] = [queue_id]
|
||||||
|
|
||||||
|
if destination is not None:
|
||||||
|
query += """---sql
|
||||||
|
AND destination = ?
|
||||||
"""
|
"""
|
||||||
params: list[Union[str, int]] = [queue_id]
|
params.append(destination)
|
||||||
|
|
||||||
if destination is not None:
|
query += """--sql
|
||||||
query += """---sql
|
ORDER BY
|
||||||
AND destination = ?
|
priority DESC,
|
||||||
"""
|
item_id ASC
|
||||||
params.append(destination)
|
;
|
||||||
|
"""
|
||||||
query += """--sql
|
cursor_.execute(query, params)
|
||||||
ORDER BY
|
results = cast(list[sqlite3.Row], cursor_.fetchall())
|
||||||
priority DESC,
|
|
||||||
item_id ASC
|
|
||||||
;
|
|
||||||
"""
|
|
||||||
cursor.execute(query, params)
|
|
||||||
results = cast(list[sqlite3.Row], cursor.fetchall())
|
|
||||||
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
|
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
|
||||||
return items
|
return items
|
||||||
|
|
||||||
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
|
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT status, count(*)
|
SELECT status, count(*)
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE queue_id = ?
|
WHERE queue_id = ?
|
||||||
GROUP BY status
|
GROUP BY status
|
||||||
""",
|
""",
|
||||||
(queue_id,),
|
(queue_id,),
|
||||||
)
|
)
|
||||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
|
|
||||||
current_item = self.get_current(queue_id=queue_id)
|
current_item = self.get_current(queue_id=queue_id)
|
||||||
total = sum(row[1] or 0 for row in counts_result)
|
total = sum(row[1] or 0 for row in counts_result)
|
||||||
@@ -701,19 +759,19 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
|
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT status, count(*), origin, destination
|
SELECT status, count(*), origin, destination
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE
|
WHERE
|
||||||
queue_id = ?
|
queue_id = ?
|
||||||
AND batch_id = ?
|
AND batch_id = ?
|
||||||
GROUP BY status
|
GROUP BY status
|
||||||
""",
|
""",
|
||||||
(queue_id, batch_id),
|
(queue_id, batch_id),
|
||||||
)
|
)
|
||||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
total = sum(row[1] or 0 for row in result)
|
total = sum(row[1] or 0 for row in result)
|
||||||
counts: dict[str, int] = {row[0]: row[1] for row in result}
|
counts: dict[str, int] = {row[0]: row[1] for row in result}
|
||||||
origin = result[0]["origin"] if result else None
|
origin = result[0]["origin"] if result else None
|
||||||
@@ -733,18 +791,18 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
|
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT status, count(*)
|
SELECT status, count(*)
|
||||||
FROM session_queue
|
FROM session_queue
|
||||||
WHERE queue_id = ?
|
WHERE queue_id = ?
|
||||||
AND destination = ?
|
AND destination = ?
|
||||||
GROUP BY status
|
GROUP BY status
|
||||||
""",
|
""",
|
||||||
(queue_id, destination),
|
(queue_id, destination),
|
||||||
)
|
)
|
||||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||||
|
|
||||||
total = sum(row[1] or 0 for row in counts_result)
|
total = sum(row[1] or 0 for row in counts_result)
|
||||||
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
|
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
|
||||||
@@ -762,7 +820,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
|
|
||||||
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
|
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
|
||||||
"""Retries the given queue items"""
|
"""Retries the given queue items"""
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
values_to_insert: list[ValueToInsertTuple] = []
|
values_to_insert: list[ValueToInsertTuple] = []
|
||||||
retried_item_ids: list[int] = []
|
retried_item_ids: list[int] = []
|
||||||
|
|
||||||
@@ -813,6 +872,10 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
values_to_insert,
|
values_to_insert,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
retry_result = RetryItemsResult(
|
retry_result = RetryItemsResult(
|
||||||
queue_id=queue_id,
|
queue_id=queue_id,
|
||||||
retried_item_ids=retried_item_ids,
|
retried_item_ids=retried_item_ids,
|
||||||
|
|||||||
@@ -1,7 +1,4 @@
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
import threading
|
|
||||||
from collections.abc import Generator
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from logging import Logger
|
from logging import Logger
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
@@ -29,65 +26,46 @@ class SqliteDatabase:
|
|||||||
|
|
||||||
def __init__(self, db_path: Path | None, logger: Logger, verbose: bool = False) -> None:
|
def __init__(self, db_path: Path | None, logger: Logger, verbose: bool = False) -> None:
|
||||||
"""Initializes the database. This is used internally by the class constructor."""
|
"""Initializes the database. This is used internally by the class constructor."""
|
||||||
self._logger = logger
|
self.logger = logger
|
||||||
self._db_path = db_path
|
self.db_path = db_path
|
||||||
self._verbose = verbose
|
self.verbose = verbose
|
||||||
self._lock = threading.RLock()
|
|
||||||
|
|
||||||
if not self._db_path:
|
if not self.db_path:
|
||||||
logger.info("Initializing in-memory database")
|
logger.info("Initializing in-memory database")
|
||||||
else:
|
else:
|
||||||
self._db_path.parent.mkdir(parents=True, exist_ok=True)
|
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
self._logger.info(f"Initializing database at {self._db_path}")
|
self.logger.info(f"Initializing database at {self.db_path}")
|
||||||
|
|
||||||
self._conn = sqlite3.connect(database=self._db_path or sqlite_memory, check_same_thread=False)
|
self.conn = sqlite3.connect(database=self.db_path or sqlite_memory, check_same_thread=False)
|
||||||
self._conn.row_factory = sqlite3.Row
|
self.conn.row_factory = sqlite3.Row
|
||||||
|
|
||||||
if self._verbose:
|
if self.verbose:
|
||||||
self._conn.set_trace_callback(self._logger.debug)
|
self.conn.set_trace_callback(self.logger.debug)
|
||||||
|
|
||||||
# Enable foreign key constraints
|
# Enable foreign key constraints
|
||||||
self._conn.execute("PRAGMA foreign_keys = ON;")
|
self.conn.execute("PRAGMA foreign_keys = ON;")
|
||||||
|
|
||||||
# Enable Write-Ahead Logging (WAL) mode for better concurrency
|
# Enable Write-Ahead Logging (WAL) mode for better concurrency
|
||||||
self._conn.execute("PRAGMA journal_mode = WAL;")
|
self.conn.execute("PRAGMA journal_mode = WAL;")
|
||||||
|
|
||||||
# Set a busy timeout to prevent database lockups during writes
|
# Set a busy timeout to prevent database lockups during writes
|
||||||
self._conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
|
self.conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
|
||||||
|
|
||||||
def clean(self) -> None:
|
def clean(self) -> None:
|
||||||
"""
|
"""
|
||||||
Cleans the database by running the VACUUM command, reporting on the freed space.
|
Cleans the database by running the VACUUM command, reporting on the freed space.
|
||||||
"""
|
"""
|
||||||
# No need to clean in-memory database
|
# No need to clean in-memory database
|
||||||
if not self._db_path:
|
if not self.db_path:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
with self._conn as conn:
|
initial_db_size = Path(self.db_path).stat().st_size
|
||||||
initial_db_size = Path(self._db_path).stat().st_size
|
self.conn.execute("VACUUM;")
|
||||||
conn.execute("VACUUM;")
|
self.conn.commit()
|
||||||
conn.commit()
|
final_db_size = Path(self.db_path).stat().st_size
|
||||||
final_db_size = Path(self._db_path).stat().st_size
|
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
if freed_space_in_mb > 0:
|
||||||
if freed_space_in_mb > 0:
|
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||||
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(f"Error cleaning database: {e}")
|
self.logger.error(f"Error cleaning database: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def transaction(self) -> Generator[sqlite3.Cursor, None, None]:
|
|
||||||
"""
|
|
||||||
Thread-safe context manager for DB work.
|
|
||||||
Acquires the RLock, yields a Cursor, then commits or rolls back.
|
|
||||||
"""
|
|
||||||
with self._lock:
|
|
||||||
cursor = self._conn.cursor()
|
|
||||||
try:
|
|
||||||
yield cursor
|
|
||||||
self._conn.commit()
|
|
||||||
except:
|
|
||||||
self._conn.rollback()
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import
|
|||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_19 import build_migration_19
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_19 import build_migration_19
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_20 import build_migration_20
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_20 import build_migration_20
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_21 import build_migration_21
|
|
||||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||||
|
|
||||||
|
|
||||||
@@ -64,7 +63,6 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
|||||||
migrator.register_migration(build_migration_18())
|
migrator.register_migration(build_migration_18())
|
||||||
migrator.register_migration(build_migration_19(app_config=config))
|
migrator.register_migration(build_migration_19(app_config=config))
|
||||||
migrator.register_migration(build_migration_20())
|
migrator.register_migration(build_migration_20())
|
||||||
migrator.register_migration(build_migration_21())
|
|
||||||
migrator.run_migrations()
|
migrator.run_migrations()
|
||||||
|
|
||||||
return db
|
return db
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
import sqlite3
|
|
||||||
|
|
||||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
|
||||||
|
|
||||||
|
|
||||||
class Migration21Callback:
|
|
||||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
|
||||||
cursor.execute(
|
|
||||||
"""
|
|
||||||
CREATE TABLE client_state (
|
|
||||||
id INTEGER PRIMARY KEY CHECK(id = 1),
|
|
||||||
data TEXT NOT NULL, -- Frontend will handle the shape of this data
|
|
||||||
updated_at DATETIME NOT NULL DEFAULT (CURRENT_TIMESTAMP)
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
cursor.execute(
|
|
||||||
"""
|
|
||||||
CREATE TRIGGER tg_client_state_updated_at
|
|
||||||
AFTER UPDATE ON client_state
|
|
||||||
FOR EACH ROW
|
|
||||||
BEGIN
|
|
||||||
UPDATE client_state
|
|
||||||
SET updated_at = CURRENT_TIMESTAMP
|
|
||||||
WHERE id = OLD.id;
|
|
||||||
END;
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def build_migration_21() -> Migration:
|
|
||||||
"""Builds the migration object for migrating from version 20 to version 21. This includes:
|
|
||||||
- Creating the `client_state` table.
|
|
||||||
- Adding a trigger to update the `updated_at` field on updates.
|
|
||||||
"""
|
|
||||||
return Migration(
|
|
||||||
from_version=20,
|
|
||||||
to_version=21,
|
|
||||||
callback=Migration21Callback(),
|
|
||||||
)
|
|
||||||
@@ -32,7 +32,7 @@ class SqliteMigrator:
|
|||||||
|
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
self._db = db
|
self._db = db
|
||||||
self._logger = db._logger
|
self._logger = db.logger
|
||||||
self._migration_set = MigrationSet()
|
self._migration_set = MigrationSet()
|
||||||
self._backup_path: Optional[Path] = None
|
self._backup_path: Optional[Path] = None
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ class SqliteMigrator:
|
|||||||
"""Migrates the database to the latest version."""
|
"""Migrates the database to the latest version."""
|
||||||
# This throws if there is a problem.
|
# This throws if there is a problem.
|
||||||
self._migration_set.validate_migration_chain()
|
self._migration_set.validate_migration_chain()
|
||||||
cursor = self._db._conn.cursor()
|
cursor = self._db.conn.cursor()
|
||||||
self._create_migrations_table(cursor=cursor)
|
self._create_migrations_table(cursor=cursor)
|
||||||
|
|
||||||
if self._migration_set.count == 0:
|
if self._migration_set.count == 0:
|
||||||
@@ -59,13 +59,13 @@ class SqliteMigrator:
|
|||||||
self._logger.info("Database update needed")
|
self._logger.info("Database update needed")
|
||||||
|
|
||||||
# Make a backup of the db if it needs to be updated and is a file db
|
# Make a backup of the db if it needs to be updated and is a file db
|
||||||
if self._db._db_path is not None:
|
if self._db.db_path is not None:
|
||||||
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
self._backup_path = self._db._db_path.parent / f"{self._db._db_path.stem}_backup_{timestamp}.db"
|
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
|
||||||
self._logger.info(f"Backing up database to {str(self._backup_path)}")
|
self._logger.info(f"Backing up database to {str(self._backup_path)}")
|
||||||
# Use SQLite to do the backup
|
# Use SQLite to do the backup
|
||||||
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
|
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
|
||||||
self._db._conn.backup(backup_conn)
|
self._db.conn.backup(backup_conn)
|
||||||
else:
|
else:
|
||||||
self._logger.info("Using in-memory database, no backup needed")
|
self._logger.info("Using in-memory database, no backup needed")
|
||||||
|
|
||||||
@@ -81,7 +81,7 @@ class SqliteMigrator:
|
|||||||
try:
|
try:
|
||||||
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
|
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
|
||||||
# exception is raised.
|
# exception is raised.
|
||||||
with self._db._conn as conn:
|
with self._db.conn as conn:
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
if self._get_current_version(cursor) != migration.from_version:
|
if self._get_current_version(cursor) != migration.from_version:
|
||||||
raise MigrationError(
|
raise MigrationError(
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from invokeai.app.util.misc import uuid_string
|
|||||||
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def start(self, invoker: Invoker) -> None:
|
def start(self, invoker: Invoker) -> None:
|
||||||
self._invoker = invoker
|
self._invoker = invoker
|
||||||
@@ -25,23 +25,24 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
|||||||
|
|
||||||
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
|
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
|
||||||
"""Gets a style preset by ID."""
|
"""Gets a style preset by ID."""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM style_presets
|
FROM style_presets
|
||||||
WHERE id = ?;
|
WHERE id = ?;
|
||||||
""",
|
""",
|
||||||
(style_preset_id,),
|
(style_preset_id,),
|
||||||
)
|
)
|
||||||
row = cursor.fetchone()
|
row = cursor.fetchone()
|
||||||
if row is None:
|
if row is None:
|
||||||
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
|
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
|
||||||
return StylePresetRecordDTO.from_dict(dict(row))
|
return StylePresetRecordDTO.from_dict(dict(row))
|
||||||
|
|
||||||
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
|
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
|
||||||
style_preset_id = uuid_string()
|
style_preset_id = uuid_string()
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT OR IGNORE INTO style_presets (
|
INSERT OR IGNORE INTO style_presets (
|
||||||
@@ -59,11 +60,16 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
|||||||
style_preset.type,
|
style_preset.type,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return self.get(style_preset_id)
|
return self.get(style_preset_id)
|
||||||
|
|
||||||
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
|
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
|
||||||
style_preset_ids = []
|
style_preset_ids = []
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
for style_preset in style_presets:
|
for style_preset in style_presets:
|
||||||
style_preset_id = uuid_string()
|
style_preset_id = uuid_string()
|
||||||
style_preset_ids.append(style_preset_id)
|
style_preset_ids.append(style_preset_id)
|
||||||
@@ -84,11 +90,16 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
|||||||
style_preset.type,
|
style_preset.type,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
|
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
# Change the name of a style preset
|
# Change the name of a style preset
|
||||||
if changes.name is not None:
|
if changes.name is not None:
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
@@ -111,10 +122,15 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
|||||||
(changes.preset_data.model_dump_json(), style_preset_id),
|
(changes.preset_data.model_dump_json(), style_preset_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return self.get(style_preset_id)
|
return self.get(style_preset_id)
|
||||||
|
|
||||||
def delete(self, style_preset_id: str) -> None:
|
def delete(self, style_preset_id: str) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE from style_presets
|
DELETE from style_presets
|
||||||
@@ -122,41 +138,51 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(style_preset_id,),
|
(style_preset_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
|
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
|
||||||
with self._db.transaction() as cursor:
|
main_query = """
|
||||||
main_query = """
|
SELECT
|
||||||
SELECT
|
*
|
||||||
*
|
FROM style_presets
|
||||||
FROM style_presets
|
"""
|
||||||
"""
|
|
||||||
|
|
||||||
if type is not None:
|
if type is not None:
|
||||||
main_query += "WHERE type = ? "
|
main_query += "WHERE type = ? "
|
||||||
|
|
||||||
main_query += "ORDER BY LOWER(name) ASC"
|
main_query += "ORDER BY LOWER(name) ASC"
|
||||||
|
|
||||||
if type is not None:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(main_query, (type,))
|
if type is not None:
|
||||||
else:
|
cursor.execute(main_query, (type,))
|
||||||
cursor.execute(main_query)
|
else:
|
||||||
|
cursor.execute(main_query)
|
||||||
|
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
|
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
|
||||||
|
|
||||||
return style_presets
|
return style_presets
|
||||||
|
|
||||||
def _sync_default_style_presets(self) -> None:
|
def _sync_default_style_presets(self) -> None:
|
||||||
"""Syncs default style presets to the database. Internal use only."""
|
"""Syncs default style presets to the database. Internal use only."""
|
||||||
with self._db.transaction() as cursor:
|
|
||||||
# First delete all existing default style presets
|
# First delete all existing default style presets
|
||||||
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE FROM style_presets
|
DELETE FROM style_presets
|
||||||
WHERE type = "default";
|
WHERE type = "default";
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
# Next, parse and create the default style presets
|
# Next, parse and create the default style presets
|
||||||
with open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
|
with open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
|
||||||
presets = json.load(file)
|
presets = json.load(file)
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ SQL_TIME_FORMAT = "%Y-%m-%d %H:%M:%f"
|
|||||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||||
def __init__(self, db: SqliteDatabase) -> None:
|
def __init__(self, db: SqliteDatabase) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._db = db
|
self._conn = db.conn
|
||||||
|
|
||||||
def start(self, invoker: Invoker) -> None:
|
def start(self, invoker: Invoker) -> None:
|
||||||
self._invoker = invoker
|
self._invoker = invoker
|
||||||
@@ -33,16 +33,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
|
|
||||||
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||||
"""Gets a workflow by ID. Updates the opened_at column."""
|
"""Gets a workflow by ID. Updates the opened_at column."""
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||||
FROM workflow_library
|
FROM workflow_library
|
||||||
WHERE workflow_id = ?;
|
WHERE workflow_id = ?;
|
||||||
""",
|
""",
|
||||||
(workflow_id,),
|
(workflow_id,),
|
||||||
)
|
)
|
||||||
row = cursor.fetchone()
|
row = cursor.fetchone()
|
||||||
if row is None:
|
if row is None:
|
||||||
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
||||||
return WorkflowRecordDTO.from_dict(dict(row))
|
return WorkflowRecordDTO.from_dict(dict(row))
|
||||||
@@ -51,8 +51,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
if workflow.meta.category is WorkflowCategory.Default:
|
if workflow.meta.category is WorkflowCategory.Default:
|
||||||
raise ValueError("Default workflows cannot be created via this method")
|
raise ValueError("Default workflows cannot be created via this method")
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
INSERT OR IGNORE INTO workflow_library (
|
INSERT OR IGNORE INTO workflow_library (
|
||||||
@@ -63,13 +64,18 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(workflow_with_id.id, workflow_with_id.model_dump_json()),
|
(workflow_with_id.id, workflow_with_id.model_dump_json()),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return self.get(workflow_with_id.id)
|
return self.get(workflow_with_id.id)
|
||||||
|
|
||||||
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||||
if workflow.meta.category is WorkflowCategory.Default:
|
if workflow.meta.category is WorkflowCategory.Default:
|
||||||
raise ValueError("Default workflows cannot be updated")
|
raise ValueError("Default workflows cannot be updated")
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
UPDATE workflow_library
|
UPDATE workflow_library
|
||||||
@@ -78,13 +84,18 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(workflow.model_dump_json(), workflow.id),
|
(workflow.model_dump_json(), workflow.id),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return self.get(workflow.id)
|
return self.get(workflow.id)
|
||||||
|
|
||||||
def delete(self, workflow_id: str) -> None:
|
def delete(self, workflow_id: str) -> None:
|
||||||
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
|
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
|
||||||
raise ValueError("Default workflows cannot be deleted")
|
raise ValueError("Default workflows cannot be deleted")
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
"""--sql
|
"""--sql
|
||||||
DELETE from workflow_library
|
DELETE from workflow_library
|
||||||
@@ -92,6 +103,10 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(workflow_id,),
|
(workflow_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_many(
|
def get_many(
|
||||||
@@ -106,108 +121,108 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
has_been_opened: Optional[bool] = None,
|
has_been_opened: Optional[bool] = None,
|
||||||
is_published: Optional[bool] = None,
|
is_published: Optional[bool] = None,
|
||||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||||
with self._db.transaction() as cursor:
|
# sanitize!
|
||||||
# sanitize!
|
assert order_by in WorkflowRecordOrderBy
|
||||||
assert order_by in WorkflowRecordOrderBy
|
assert direction in SQLiteDirection
|
||||||
assert direction in SQLiteDirection
|
|
||||||
|
|
||||||
# We will construct the query dynamically based on the query params
|
# We will construct the query dynamically based on the query params
|
||||||
|
|
||||||
# The main query to get the workflows / counts
|
# The main query to get the workflows / counts
|
||||||
main_query = """
|
main_query = """
|
||||||
SELECT
|
SELECT
|
||||||
workflow_id,
|
workflow_id,
|
||||||
category,
|
category,
|
||||||
name,
|
name,
|
||||||
description,
|
description,
|
||||||
created_at,
|
created_at,
|
||||||
updated_at,
|
updated_at,
|
||||||
opened_at,
|
opened_at,
|
||||||
tags
|
tags
|
||||||
FROM workflow_library
|
FROM workflow_library
|
||||||
"""
|
"""
|
||||||
count_query = "SELECT COUNT(*) FROM workflow_library"
|
count_query = "SELECT COUNT(*) FROM workflow_library"
|
||||||
|
|
||||||
# Start with an empty list of conditions and params
|
# Start with an empty list of conditions and params
|
||||||
conditions: list[str] = []
|
conditions: list[str] = []
|
||||||
params: list[str | int] = []
|
params: list[str | int] = []
|
||||||
|
|
||||||
if categories:
|
if categories:
|
||||||
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
|
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
|
||||||
|
|
||||||
# Ensure all categories are valid (is this necessary?)
|
# Ensure all categories are valid (is this necessary?)
|
||||||
assert all(c in WorkflowCategory for c in categories)
|
assert all(c in WorkflowCategory for c in categories)
|
||||||
|
|
||||||
# Construct a placeholder string for the number of categories
|
# Construct a placeholder string for the number of categories
|
||||||
placeholders = ", ".join("?" for _ in categories)
|
placeholders = ", ".join("?" for _ in categories)
|
||||||
|
|
||||||
# Construct the condition string & params
|
# Construct the condition string & params
|
||||||
category_condition = f"category IN ({placeholders})"
|
category_condition = f"category IN ({placeholders})"
|
||||||
category_params = [category.value for category in categories]
|
category_params = [category.value for category in categories]
|
||||||
|
|
||||||
conditions.append(category_condition)
|
conditions.append(category_condition)
|
||||||
params.extend(category_params)
|
params.extend(category_params)
|
||||||
|
|
||||||
if tags:
|
if tags:
|
||||||
# Tags is a list of strings, and a single string in the DB
|
# Tags is a list of strings, and a single string in the DB
|
||||||
# The string in the DB has no guaranteed format
|
# The string in the DB has no guaranteed format
|
||||||
|
|
||||||
# Construct a list of conditions for each tag
|
# Construct a list of conditions for each tag
|
||||||
tags_conditions = ["tags LIKE ?" for _ in tags]
|
tags_conditions = ["tags LIKE ?" for _ in tags]
|
||||||
tags_conditions_joined = " OR ".join(tags_conditions)
|
tags_conditions_joined = " OR ".join(tags_conditions)
|
||||||
tags_condition = f"({tags_conditions_joined})"
|
tags_condition = f"({tags_conditions_joined})"
|
||||||
|
|
||||||
# And the params for the tags, case-insensitive
|
# And the params for the tags, case-insensitive
|
||||||
tags_params = [f"%{t.strip()}%" for t in tags]
|
tags_params = [f"%{t.strip()}%" for t in tags]
|
||||||
|
|
||||||
conditions.append(tags_condition)
|
conditions.append(tags_condition)
|
||||||
params.extend(tags_params)
|
params.extend(tags_params)
|
||||||
|
|
||||||
if has_been_opened:
|
if has_been_opened:
|
||||||
conditions.append("opened_at IS NOT NULL")
|
conditions.append("opened_at IS NOT NULL")
|
||||||
elif has_been_opened is False:
|
elif has_been_opened is False:
|
||||||
conditions.append("opened_at IS NULL")
|
conditions.append("opened_at IS NULL")
|
||||||
|
|
||||||
# Ignore whitespace in the query
|
# Ignore whitespace in the query
|
||||||
stripped_query = query.strip() if query else None
|
stripped_query = query.strip() if query else None
|
||||||
if stripped_query:
|
if stripped_query:
|
||||||
# Construct a wildcard query for the name, description, and tags
|
# Construct a wildcard query for the name, description, and tags
|
||||||
wildcard_query = "%" + stripped_query + "%"
|
wildcard_query = "%" + stripped_query + "%"
|
||||||
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
|
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
|
||||||
|
|
||||||
conditions.append(query_condition)
|
conditions.append(query_condition)
|
||||||
params.extend([wildcard_query, wildcard_query, wildcard_query])
|
params.extend([wildcard_query, wildcard_query, wildcard_query])
|
||||||
|
|
||||||
if conditions:
|
if conditions:
|
||||||
# If there are conditions, add a WHERE clause and then join the conditions
|
# If there are conditions, add a WHERE clause and then join the conditions
|
||||||
main_query += " WHERE "
|
main_query += " WHERE "
|
||||||
count_query += " WHERE "
|
count_query += " WHERE "
|
||||||
|
|
||||||
all_conditions = " AND ".join(conditions)
|
all_conditions = " AND ".join(conditions)
|
||||||
main_query += all_conditions
|
main_query += all_conditions
|
||||||
count_query += all_conditions
|
count_query += all_conditions
|
||||||
|
|
||||||
# After this point, the query and params differ for the main query and the count query
|
# After this point, the query and params differ for the main query and the count query
|
||||||
main_params = params.copy()
|
main_params = params.copy()
|
||||||
count_params = params.copy()
|
count_params = params.copy()
|
||||||
|
|
||||||
# Main query also gets ORDER BY and LIMIT/OFFSET
|
# Main query also gets ORDER BY and LIMIT/OFFSET
|
||||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||||
|
|
||||||
if per_page:
|
if per_page:
|
||||||
main_query += " LIMIT ? OFFSET ?"
|
main_query += " LIMIT ? OFFSET ?"
|
||||||
main_params.extend([per_page, page * per_page])
|
main_params.extend([per_page, page * per_page])
|
||||||
|
|
||||||
# Put a ring on it
|
# Put a ring on it
|
||||||
main_query += ";"
|
main_query += ";"
|
||||||
count_query += ";"
|
count_query += ";"
|
||||||
|
|
||||||
cursor.execute(main_query, main_params)
|
cursor = self._conn.cursor()
|
||||||
rows = cursor.fetchall()
|
cursor.execute(main_query, main_params)
|
||||||
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
rows = cursor.fetchall()
|
||||||
|
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
||||||
|
|
||||||
cursor.execute(count_query, count_params)
|
cursor.execute(count_query, count_params)
|
||||||
total = cursor.fetchone()[0]
|
total = cursor.fetchone()[0]
|
||||||
|
|
||||||
if per_page:
|
if per_page:
|
||||||
pages = total // per_page + (total % per_page > 0)
|
pages = total // per_page + (total % per_page > 0)
|
||||||
@@ -232,46 +247,46 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
if not tags:
|
if not tags:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
result: dict[str, int] = {}
|
result: dict[str, int] = {}
|
||||||
# Base conditions for categories and selected tags
|
# Base conditions for categories and selected tags
|
||||||
base_conditions: list[str] = []
|
base_conditions: list[str] = []
|
||||||
base_params: list[str | int] = []
|
base_params: list[str | int] = []
|
||||||
|
|
||||||
# Add category conditions
|
# Add category conditions
|
||||||
if categories:
|
if categories:
|
||||||
assert all(c in WorkflowCategory for c in categories)
|
assert all(c in WorkflowCategory for c in categories)
|
||||||
placeholders = ", ".join("?" for _ in categories)
|
placeholders = ", ".join("?" for _ in categories)
|
||||||
base_conditions.append(f"category IN ({placeholders})")
|
base_conditions.append(f"category IN ({placeholders})")
|
||||||
base_params.extend([category.value for category in categories])
|
base_params.extend([category.value for category in categories])
|
||||||
|
|
||||||
if has_been_opened:
|
if has_been_opened:
|
||||||
base_conditions.append("opened_at IS NOT NULL")
|
base_conditions.append("opened_at IS NOT NULL")
|
||||||
elif has_been_opened is False:
|
elif has_been_opened is False:
|
||||||
base_conditions.append("opened_at IS NULL")
|
base_conditions.append("opened_at IS NULL")
|
||||||
|
|
||||||
# For each tag to count, run a separate query
|
# For each tag to count, run a separate query
|
||||||
for tag in tags:
|
for tag in tags:
|
||||||
# Start with the base conditions
|
# Start with the base conditions
|
||||||
conditions = base_conditions.copy()
|
conditions = base_conditions.copy()
|
||||||
params = base_params.copy()
|
params = base_params.copy()
|
||||||
|
|
||||||
# Add this specific tag condition
|
# Add this specific tag condition
|
||||||
conditions.append("tags LIKE ?")
|
conditions.append("tags LIKE ?")
|
||||||
params.append(f"%{tag.strip()}%")
|
params.append(f"%{tag.strip()}%")
|
||||||
|
|
||||||
# Construct the full query
|
# Construct the full query
|
||||||
stmt = """--sql
|
stmt = """--sql
|
||||||
SELECT COUNT(*)
|
SELECT COUNT(*)
|
||||||
FROM workflow_library
|
FROM workflow_library
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if conditions:
|
if conditions:
|
||||||
stmt += " WHERE " + " AND ".join(conditions)
|
stmt += " WHERE " + " AND ".join(conditions)
|
||||||
|
|
||||||
cursor.execute(stmt, params)
|
cursor.execute(stmt, params)
|
||||||
count = cursor.fetchone()[0]
|
count = cursor.fetchone()[0]
|
||||||
result[tag] = count
|
result[tag] = count
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -281,51 +296,52 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
has_been_opened: Optional[bool] = None,
|
has_been_opened: Optional[bool] = None,
|
||||||
is_published: Optional[bool] = None,
|
is_published: Optional[bool] = None,
|
||||||
) -> dict[str, int]:
|
) -> dict[str, int]:
|
||||||
with self._db.transaction() as cursor:
|
cursor = self._conn.cursor()
|
||||||
result: dict[str, int] = {}
|
result: dict[str, int] = {}
|
||||||
# Base conditions for categories
|
# Base conditions for categories
|
||||||
base_conditions: list[str] = []
|
base_conditions: list[str] = []
|
||||||
base_params: list[str | int] = []
|
base_params: list[str | int] = []
|
||||||
|
|
||||||
# Add category conditions
|
# Add category conditions
|
||||||
if categories:
|
if categories:
|
||||||
assert all(c in WorkflowCategory for c in categories)
|
assert all(c in WorkflowCategory for c in categories)
|
||||||
placeholders = ", ".join("?" for _ in categories)
|
placeholders = ", ".join("?" for _ in categories)
|
||||||
base_conditions.append(f"category IN ({placeholders})")
|
base_conditions.append(f"category IN ({placeholders})")
|
||||||
base_params.extend([category.value for category in categories])
|
base_params.extend([category.value for category in categories])
|
||||||
|
|
||||||
if has_been_opened:
|
if has_been_opened:
|
||||||
base_conditions.append("opened_at IS NOT NULL")
|
base_conditions.append("opened_at IS NOT NULL")
|
||||||
elif has_been_opened is False:
|
elif has_been_opened is False:
|
||||||
base_conditions.append("opened_at IS NULL")
|
base_conditions.append("opened_at IS NULL")
|
||||||
|
|
||||||
# For each category to count, run a separate query
|
# For each category to count, run a separate query
|
||||||
for category in categories:
|
for category in categories:
|
||||||
# Start with the base conditions
|
# Start with the base conditions
|
||||||
conditions = base_conditions.copy()
|
conditions = base_conditions.copy()
|
||||||
params = base_params.copy()
|
params = base_params.copy()
|
||||||
|
|
||||||
# Add this specific category condition
|
# Add this specific category condition
|
||||||
conditions.append("category = ?")
|
conditions.append("category = ?")
|
||||||
params.append(category.value)
|
params.append(category.value)
|
||||||
|
|
||||||
# Construct the full query
|
# Construct the full query
|
||||||
stmt = """--sql
|
stmt = """--sql
|
||||||
SELECT COUNT(*)
|
SELECT COUNT(*)
|
||||||
FROM workflow_library
|
FROM workflow_library
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if conditions:
|
if conditions:
|
||||||
stmt += " WHERE " + " AND ".join(conditions)
|
stmt += " WHERE " + " AND ".join(conditions)
|
||||||
|
|
||||||
cursor.execute(stmt, params)
|
cursor.execute(stmt, params)
|
||||||
count = cursor.fetchone()[0]
|
count = cursor.fetchone()[0]
|
||||||
result[category.value] = count
|
result[category.value] = count
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def update_opened_at(self, workflow_id: str) -> None:
|
def update_opened_at(self, workflow_id: str) -> None:
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
f"""--sql
|
f"""--sql
|
||||||
UPDATE workflow_library
|
UPDATE workflow_library
|
||||||
@@ -334,6 +350,10 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(workflow_id,),
|
(workflow_id,),
|
||||||
)
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
def _sync_default_workflows(self) -> None:
|
def _sync_default_workflows(self) -> None:
|
||||||
"""Syncs default workflows to the database. Internal use only."""
|
"""Syncs default workflows to the database. Internal use only."""
|
||||||
@@ -348,7 +368,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
meaningless, as they are overwritten every time the server starts.
|
meaningless, as they are overwritten every time the server starts.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
with self._db.transaction() as cursor:
|
try:
|
||||||
|
cursor = self._conn.cursor()
|
||||||
workflows_from_file: list[Workflow] = []
|
workflows_from_file: list[Workflow] = []
|
||||||
workflows_to_update: list[Workflow] = []
|
workflows_to_update: list[Workflow] = []
|
||||||
workflows_to_add: list[Workflow] = []
|
workflows_to_add: list[Workflow] = []
|
||||||
@@ -428,3 +449,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
""",
|
""",
|
||||||
(w.model_dump_json(), w.id),
|
(w.model_dump_json(), w.id),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self._conn.commit()
|
||||||
|
except Exception:
|
||||||
|
self._conn.rollback()
|
||||||
|
raise
|
||||||
|
|||||||
@@ -73,14 +73,14 @@ class KontextExtension:
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
kontext_conditioning: list[FluxKontextConditioningField],
|
kontext_conditioning: FluxKontextConditioningField,
|
||||||
context: InvocationContext,
|
context: InvocationContext,
|
||||||
vae_field: VAEField,
|
vae_field: VAEField,
|
||||||
device: torch.device,
|
device: torch.device,
|
||||||
dtype: torch.dtype,
|
dtype: torch.dtype,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initializes the KontextExtension, pre-processing the reference images
|
Initializes the KontextExtension, pre-processing the reference image
|
||||||
into latents and positional IDs.
|
into latents and positional IDs.
|
||||||
"""
|
"""
|
||||||
self._context = context
|
self._context = context
|
||||||
@@ -93,68 +93,54 @@ class KontextExtension:
|
|||||||
self.kontext_latents, self.kontext_ids = self._prepare_kontext()
|
self.kontext_latents, self.kontext_ids = self._prepare_kontext()
|
||||||
|
|
||||||
def _prepare_kontext(self) -> tuple[torch.Tensor, torch.Tensor]:
|
def _prepare_kontext(self) -> tuple[torch.Tensor, torch.Tensor]:
|
||||||
"""Encodes the reference images and prepares their concatenated latents and IDs."""
|
"""Encodes the reference image and prepares its latents and IDs."""
|
||||||
all_latents = []
|
image = self._context.images.get_pil(self.kontext_conditioning.image.image_name)
|
||||||
all_ids = []
|
|
||||||
|
|
||||||
|
# Calculate aspect ratio of input image
|
||||||
|
width, height = image.size
|
||||||
|
aspect_ratio = width / height
|
||||||
|
|
||||||
|
# Find the closest preferred resolution by aspect ratio
|
||||||
|
_, target_width, target_height = min(
|
||||||
|
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply BFL's scaling formula
|
||||||
|
# This ensures compatibility with the model's training
|
||||||
|
scaled_width = 2 * int(target_width / 16)
|
||||||
|
scaled_height = 2 * int(target_height / 16)
|
||||||
|
|
||||||
|
# Resize to the exact resolution used during training
|
||||||
|
image = image.convert("RGB")
|
||||||
|
final_width = 8 * scaled_width
|
||||||
|
final_height = 8 * scaled_height
|
||||||
|
image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
|
||||||
|
|
||||||
|
# Convert to tensor with same normalization as BFL
|
||||||
|
image_np = np.array(image)
|
||||||
|
image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0
|
||||||
|
image_tensor = einops.rearrange(image_tensor, "h w c -> 1 c h w")
|
||||||
|
image_tensor = image_tensor.to(self._device)
|
||||||
|
|
||||||
|
# Continue with VAE encoding
|
||||||
vae_info = self._context.models.load(self._vae_field.vae)
|
vae_info = self._context.models.load(self._vae_field.vae)
|
||||||
|
kontext_latents_unpacked = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||||
|
|
||||||
for idx, kontext_field in enumerate(self.kontext_conditioning):
|
# Extract tensor dimensions
|
||||||
image = self._context.images.get_pil(kontext_field.image.image_name)
|
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||||
|
|
||||||
# Calculate aspect ratio of input image
|
# Pack the latents and generate IDs
|
||||||
width, height = image.size
|
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
||||||
aspect_ratio = width / height
|
kontext_ids = generate_img_ids_with_offset(
|
||||||
|
latent_height=latent_height,
|
||||||
|
latent_width=latent_width,
|
||||||
|
batch_size=batch_size,
|
||||||
|
device=self._device,
|
||||||
|
dtype=self._dtype,
|
||||||
|
idx_offset=1,
|
||||||
|
)
|
||||||
|
|
||||||
# Find the closest preferred resolution by aspect ratio
|
return kontext_latents_packed, kontext_ids
|
||||||
_, target_width, target_height = min(
|
|
||||||
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Apply BFL's scaling formula
|
|
||||||
# This ensures compatibility with the model's training
|
|
||||||
scaled_width = 2 * int(target_width / 16)
|
|
||||||
scaled_height = 2 * int(target_height / 16)
|
|
||||||
|
|
||||||
# Resize to the exact resolution used during training
|
|
||||||
image = image.convert("RGB")
|
|
||||||
final_width = 8 * scaled_width
|
|
||||||
final_height = 8 * scaled_height
|
|
||||||
image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
|
|
||||||
|
|
||||||
# Convert to tensor with same normalization as BFL
|
|
||||||
image_np = np.array(image)
|
|
||||||
image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0
|
|
||||||
image_tensor = einops.rearrange(image_tensor, "h w c -> 1 c h w")
|
|
||||||
image_tensor = image_tensor.to(self._device)
|
|
||||||
|
|
||||||
# Continue with VAE encoding
|
|
||||||
kontext_latents_unpacked = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
|
||||||
|
|
||||||
# Extract tensor dimensions
|
|
||||||
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
|
||||||
|
|
||||||
# Pack the latents
|
|
||||||
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
|
||||||
|
|
||||||
# Generate IDs with offset based on image index
|
|
||||||
kontext_ids = generate_img_ids_with_offset(
|
|
||||||
latent_height=latent_height,
|
|
||||||
latent_width=latent_width,
|
|
||||||
batch_size=batch_size,
|
|
||||||
device=self._device,
|
|
||||||
dtype=self._dtype,
|
|
||||||
idx_offset=idx + 1, # Each image gets a unique offset
|
|
||||||
)
|
|
||||||
|
|
||||||
all_latents.append(kontext_latents_packed)
|
|
||||||
all_ids.append(kontext_ids)
|
|
||||||
|
|
||||||
# Concatenate all latents and IDs along the sequence dimension
|
|
||||||
concatenated_latents = torch.cat(all_latents, dim=1) # Concatenate along sequence dimension
|
|
||||||
concatenated_ids = torch.cat(all_ids, dim=1) # Concatenate along sequence dimension
|
|
||||||
|
|
||||||
return concatenated_latents, concatenated_ids
|
|
||||||
|
|
||||||
def ensure_batch_size(self, target_batch_size: int) -> None:
|
def ensure_batch_size(self, target_batch_size: int) -> None:
|
||||||
"""Ensures the kontext latents and IDs match the target batch size by repeating if necessary."""
|
"""Ensures the kontext latents and IDs match the target batch size by repeating if necessary."""
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ class ModelConfigBase(ABC, BaseModel):
|
|||||||
else:
|
else:
|
||||||
return config_cls.from_model_on_disk(mod, **overrides)
|
return config_cls.from_model_on_disk(mod, **overrides)
|
||||||
|
|
||||||
raise InvalidModelConfigException("Unable to determine model type")
|
raise InvalidModelConfigException("No valid config found")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_tag(cls) -> Tag:
|
def get_tag(cls) -> Tag:
|
||||||
|
|||||||
@@ -1,145 +0,0 @@
|
|||||||
"""Utility functions for extracting metadata from LoRA model files."""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, Dict, Optional, Set, Tuple
|
|
||||||
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
from invokeai.app.util.thumbnails import make_thumbnail
|
|
||||||
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def extract_lora_metadata(
|
|
||||||
model_path: Path, model_key: str, model_images_path: Path
|
|
||||||
) -> Tuple[Optional[str], Optional[Set[str]]]:
|
|
||||||
"""
|
|
||||||
Extract metadata for a LoRA model from associated JSON and image files.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_path: Path to the LoRA model file
|
|
||||||
model_key: Unique key for the model
|
|
||||||
model_images_path: Path to the model images directory
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (description, trigger_phrases)
|
|
||||||
"""
|
|
||||||
model_stem = model_path.stem
|
|
||||||
model_dir = model_path.parent
|
|
||||||
|
|
||||||
# Find and process preview image
|
|
||||||
_process_preview_image(model_stem, model_dir, model_key, model_images_path)
|
|
||||||
|
|
||||||
# Extract metadata from JSON
|
|
||||||
description, trigger_phrases = _extract_json_metadata(model_stem, model_dir)
|
|
||||||
|
|
||||||
return description, trigger_phrases
|
|
||||||
|
|
||||||
|
|
||||||
def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, model_images_path: Path) -> bool:
|
|
||||||
"""Find and process a preview image for the model, saving it to the model images store."""
|
|
||||||
image_extensions = [".png", ".jpg", ".jpeg", ".webp"]
|
|
||||||
|
|
||||||
for ext in image_extensions:
|
|
||||||
image_path = model_dir / f"{model_stem}{ext}"
|
|
||||||
if image_path.exists():
|
|
||||||
try:
|
|
||||||
# Open the image
|
|
||||||
with Image.open(image_path) as img:
|
|
||||||
# Create thumbnail and save to model images directory
|
|
||||||
thumbnail = make_thumbnail(img, 256)
|
|
||||||
thumbnail_path = model_images_path / f"{model_key}.webp"
|
|
||||||
thumbnail.save(thumbnail_path, format="webp")
|
|
||||||
|
|
||||||
logger.info(f"Processed preview image {image_path.name} for model {model_key}")
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"Failed to process preview image {image_path.name}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
|
|
||||||
"""Extract metadata from a JSON file with the same name as the model."""
|
|
||||||
json_path = model_dir / f"{model_stem}.json"
|
|
||||||
|
|
||||||
if not json_path.exists():
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(json_path, "r", encoding="utf-8") as f:
|
|
||||||
metadata = json.load(f)
|
|
||||||
|
|
||||||
# Extract description
|
|
||||||
description = _build_description(metadata)
|
|
||||||
|
|
||||||
# Extract trigger phrases
|
|
||||||
trigger_phrases = _extract_trigger_phrases(metadata)
|
|
||||||
|
|
||||||
if description or trigger_phrases:
|
|
||||||
logger.info(f"Applied metadata from {json_path.name}")
|
|
||||||
|
|
||||||
return description, trigger_phrases
|
|
||||||
|
|
||||||
except (json.JSONDecodeError, IOError, Exception) as e:
|
|
||||||
logger.warning(f"Failed to read metadata from {json_path}: {e}")
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
|
|
||||||
def _build_description(metadata: Dict[str, Any]) -> Optional[str]:
|
|
||||||
"""Build a description from metadata fields."""
|
|
||||||
description_parts = []
|
|
||||||
|
|
||||||
if description := metadata.get("description"):
|
|
||||||
description_parts.append(str(description).strip())
|
|
||||||
|
|
||||||
if notes := metadata.get("notes"):
|
|
||||||
description_parts.append(str(notes).strip())
|
|
||||||
|
|
||||||
return " | ".join(description_parts) if description_parts else None
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_trigger_phrases(metadata: Dict[str, Any]) -> Optional[Set[str]]:
|
|
||||||
"""Extract trigger phrases from metadata."""
|
|
||||||
if not (activation_text := metadata.get("activation text")):
|
|
||||||
return None
|
|
||||||
|
|
||||||
activation_text = str(activation_text).strip()
|
|
||||||
if not activation_text:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Split on commas and clean up each phrase
|
|
||||||
phrases = [phrase.strip() for phrase in activation_text.split(",") if phrase.strip()]
|
|
||||||
|
|
||||||
return set(phrases) if phrases else None
|
|
||||||
|
|
||||||
|
|
||||||
def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_path: Path) -> None:
|
|
||||||
"""
|
|
||||||
Apply extracted metadata to a LoRA model configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
info: The model configuration to update
|
|
||||||
model_path: Path to the LoRA model file
|
|
||||||
model_images_path: Path to the model images directory
|
|
||||||
"""
|
|
||||||
# Only process LoRA models
|
|
||||||
if info.type != ModelType.LoRA:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Extract and apply metadata
|
|
||||||
description, trigger_phrases = extract_lora_metadata(model_path, info.key, model_images_path)
|
|
||||||
|
|
||||||
# We don't set cover_image path in the config anymore since images are stored
|
|
||||||
# separately in the model images store by model key
|
|
||||||
|
|
||||||
if description:
|
|
||||||
info.description = description
|
|
||||||
|
|
||||||
if trigger_phrases:
|
|
||||||
info.trigger_phrases = trigger_phrases
|
|
||||||
10
invokeai/frontend/web/.eslintignore
Normal file
10
invokeai/frontend/web/.eslintignore
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
dist/
|
||||||
|
static/
|
||||||
|
.husky/
|
||||||
|
node_modules/
|
||||||
|
patches/
|
||||||
|
stats.html
|
||||||
|
index.html
|
||||||
|
.yarn/
|
||||||
|
*.scss
|
||||||
|
src/services/api/schema.ts
|
||||||
88
invokeai/frontend/web/.eslintrc.js
Normal file
88
invokeai/frontend/web/.eslintrc.js
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
module.exports = {
|
||||||
|
extends: ['@invoke-ai/eslint-config-react'],
|
||||||
|
plugins: ['path', 'i18next'],
|
||||||
|
rules: {
|
||||||
|
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
|
||||||
|
'react-refresh/only-export-components': 'off',
|
||||||
|
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
|
||||||
|
'@typescript-eslint/consistent-type-assertions': 'off',
|
||||||
|
// https://github.com/qdanik/eslint-plugin-path
|
||||||
|
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||||
|
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||||
|
// TODO: ENABLE THIS RULE BEFORE v6.0.0
|
||||||
|
// 'i18next/no-literal-string': 'error',
|
||||||
|
// https://eslint.org/docs/latest/rules/no-console
|
||||||
|
'no-console': 'warn',
|
||||||
|
// https://eslint.org/docs/latest/rules/no-promise-executor-return
|
||||||
|
'no-promise-executor-return': 'error',
|
||||||
|
// https://eslint.org/docs/latest/rules/require-await
|
||||||
|
'require-await': 'error',
|
||||||
|
// Restrict setActiveTab calls to only use-navigation-api.tsx
|
||||||
|
'no-restricted-syntax': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
selector: 'CallExpression[callee.name="setActiveTab"]',
|
||||||
|
message:
|
||||||
|
'setActiveTab() can only be called from use-navigation-api.tsx. Use navigationApi.switchToTab() instead.',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
// TODO: ENABLE THIS RULE BEFORE v6.0.0
|
||||||
|
'react/display-name': 'off',
|
||||||
|
'no-restricted-properties': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
object: 'crypto',
|
||||||
|
property: 'randomUUID',
|
||||||
|
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
object: 'navigator',
|
||||||
|
property: 'clipboard',
|
||||||
|
message:
|
||||||
|
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'no-restricted-imports': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
paths: [
|
||||||
|
{
|
||||||
|
name: 'lodash-es',
|
||||||
|
importNames: ['isEqual'],
|
||||||
|
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'lodash-es',
|
||||||
|
message: 'Please use es-toolkit instead.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'es-toolkit',
|
||||||
|
importNames: ['isEqual'],
|
||||||
|
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
overrides: [
|
||||||
|
/**
|
||||||
|
* Allow setActiveTab calls only in use-navigation-api.tsx
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
files: ['**/use-navigation-api.tsx'],
|
||||||
|
rules: {
|
||||||
|
'no-restricted-syntax': 'off',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
/**
|
||||||
|
* Overrides for stories
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
files: ['*.stories.tsx'],
|
||||||
|
rules: {
|
||||||
|
// We may not have i18n available in stories.
|
||||||
|
'i18next/no-literal-string': 'off',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
1
invokeai/frontend/web/.gitignore
vendored
1
invokeai/frontend/web/.gitignore
vendored
@@ -45,4 +45,3 @@ yalc.lock
|
|||||||
# vitest
|
# vitest
|
||||||
tsconfig.vitest-temp.json
|
tsconfig.vitest-temp.json
|
||||||
coverage/
|
coverage/
|
||||||
*.tgz
|
|
||||||
|
|||||||
@@ -14,4 +14,3 @@ static/
|
|||||||
src/theme/css/overlayscrollbars.css
|
src/theme/css/overlayscrollbars.css
|
||||||
src/theme_/css/overlayscrollbars.css
|
src/theme_/css/overlayscrollbars.css
|
||||||
pnpm-lock.yaml
|
pnpm-lock.yaml
|
||||||
.claude
|
|
||||||
|
|||||||
11
invokeai/frontend/web/.prettierrc.js
Normal file
11
invokeai/frontend/web/.prettierrc.js
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
module.exports = {
|
||||||
|
...require('@invoke-ai/prettier-config-react'),
|
||||||
|
overrides: [
|
||||||
|
{
|
||||||
|
files: ['public/locales/*.json'],
|
||||||
|
options: {
|
||||||
|
tabWidth: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "http://json.schemastore.org/prettierrc",
|
|
||||||
"trailingComma": "es5",
|
|
||||||
"printWidth": 120,
|
|
||||||
"tabWidth": 2,
|
|
||||||
"semi": true,
|
|
||||||
"singleQuote": true,
|
|
||||||
"endOfLine": "auto",
|
|
||||||
"overrides": [
|
|
||||||
{
|
|
||||||
"files": ["public/locales/*.json"],
|
|
||||||
"options": {
|
|
||||||
"tabWidth": 4
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,23 +1,21 @@
|
|||||||
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||||
import type { PropsWithChildren } from 'react';
|
|
||||||
import { memo, useEffect } from 'react';
|
|
||||||
|
|
||||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
|
||||||
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
||||||
|
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||||
|
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||||
/**
|
/**
|
||||||
* Initializes some state for storybook. Must be in a different component
|
* Initializes some state for storybook. Must be in a different component
|
||||||
* so that it is run inside the redux context.
|
* so that it is run inside the redux context.
|
||||||
*/
|
*/
|
||||||
export const ReduxInit = memo(({ children }: PropsWithChildren) => {
|
export const ReduxInit = memo((props: PropsWithChildren) => {
|
||||||
const dispatch = useAppDispatch();
|
const dispatch = useAppDispatch();
|
||||||
useGlobalModifiersInit();
|
useGlobalModifiersInit();
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
dispatch(
|
dispatch(
|
||||||
modelChanged({ model: { key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' } })
|
modelChanged({ model: { key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' } })
|
||||||
);
|
);
|
||||||
}, [dispatch]);
|
}, []);
|
||||||
|
|
||||||
return children;
|
return props.children;
|
||||||
});
|
});
|
||||||
|
|
||||||
ReduxInit.displayName = 'ReduxInit';
|
ReduxInit.displayName = 'ReduxInit';
|
||||||
|
|||||||
@@ -2,13 +2,19 @@ import type { StorybookConfig } from '@storybook/react-vite';
|
|||||||
|
|
||||||
const config: StorybookConfig = {
|
const config: StorybookConfig = {
|
||||||
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
|
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
|
||||||
addons: ['@storybook/addon-links', '@storybook/addon-docs'],
|
addons: [
|
||||||
|
'@storybook/addon-links',
|
||||||
|
'@storybook/addon-essentials',
|
||||||
|
'@storybook/addon-interactions',
|
||||||
|
'@storybook/addon-storysource',
|
||||||
|
],
|
||||||
framework: {
|
framework: {
|
||||||
name: '@storybook/react-vite',
|
name: '@storybook/react-vite',
|
||||||
options: {},
|
options: {},
|
||||||
},
|
},
|
||||||
|
docs: {
|
||||||
|
autodocs: 'tag',
|
||||||
|
},
|
||||||
core: {
|
core: {
|
||||||
disableTelemetry: true,
|
disableTelemetry: true,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { addons } from 'storybook/manager-api';
|
import { addons } from '@storybook/manager-api';
|
||||||
import { themes } from 'storybook/theming';
|
import { themes } from '@storybook/theming';
|
||||||
|
|
||||||
addons.setConfig({
|
addons.setConfig({
|
||||||
theme: themes.dark,
|
theme: themes.dark,
|
||||||
|
|||||||
@@ -1,18 +1,17 @@
|
|||||||
import type { Preview } from '@storybook/react-vite';
|
import { Preview } from '@storybook/react';
|
||||||
import { themes } from 'storybook/theming';
|
import { themes } from '@storybook/theming';
|
||||||
import { $store } from 'app/store/nanostores/store';
|
|
||||||
import i18n from 'i18next';
|
import i18n from 'i18next';
|
||||||
import { initReactI18next } from 'react-i18next';
|
import { initReactI18next } from 'react-i18next';
|
||||||
import { Provider } from 'react-redux';
|
import { Provider } from 'react-redux';
|
||||||
|
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||||
|
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||||
|
import { createStore } from '../src/app/store/store';
|
||||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
import translationEN from '../public/locales/en.json';
|
import translationEN from '../public/locales/en.json';
|
||||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
|
||||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
|
||||||
import { createStore } from '../src/app/store/store';
|
|
||||||
import { ReduxInit } from './ReduxInit';
|
import { ReduxInit } from './ReduxInit';
|
||||||
|
import { $store } from 'app/store/nanostores/store';
|
||||||
|
|
||||||
i18n.use(initReactI18next).init({
|
i18n.use(initReactI18next).init({
|
||||||
lng: 'en',
|
lng: 'en',
|
||||||
@@ -26,7 +25,7 @@ i18n.use(initReactI18next).init({
|
|||||||
returnNull: false,
|
returnNull: false,
|
||||||
});
|
});
|
||||||
|
|
||||||
const store = createStore();
|
const store = createStore(undefined, false);
|
||||||
$store.set(store);
|
$store.set(store);
|
||||||
$baseUrl.set('http://localhost:9090');
|
$baseUrl.set('http://localhost:9090');
|
||||||
|
|
||||||
@@ -47,7 +46,6 @@ const preview: Preview = {
|
|||||||
parameters: {
|
parameters: {
|
||||||
docs: {
|
docs: {
|
||||||
theme: themes.dark,
|
theme: themes.dark,
|
||||||
codePanel: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,246 +0,0 @@
|
|||||||
import js from '@eslint/js';
|
|
||||||
import typescriptEslint from '@typescript-eslint/eslint-plugin';
|
|
||||||
import typescriptParser from '@typescript-eslint/parser';
|
|
||||||
import pluginI18Next from 'eslint-plugin-i18next';
|
|
||||||
import pluginImport from 'eslint-plugin-import';
|
|
||||||
import pluginPath from 'eslint-plugin-path';
|
|
||||||
import pluginReact from 'eslint-plugin-react';
|
|
||||||
import pluginReactHooks from 'eslint-plugin-react-hooks';
|
|
||||||
import pluginReactRefresh from 'eslint-plugin-react-refresh';
|
|
||||||
import pluginSimpleImportSort from 'eslint-plugin-simple-import-sort';
|
|
||||||
import pluginStorybook from 'eslint-plugin-storybook';
|
|
||||||
import pluginUnusedImports from 'eslint-plugin-unused-imports';
|
|
||||||
import globals from 'globals';
|
|
||||||
|
|
||||||
export default [
|
|
||||||
js.configs.recommended,
|
|
||||||
|
|
||||||
{
|
|
||||||
languageOptions: {
|
|
||||||
parser: typescriptParser,
|
|
||||||
parserOptions: {
|
|
||||||
ecmaFeatures: {
|
|
||||||
jsx: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
globals: {
|
|
||||||
...globals.browser,
|
|
||||||
...globals.node,
|
|
||||||
GlobalCompositeOperation: 'readonly',
|
|
||||||
RequestInit: 'readonly',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
files: ['**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx'],
|
|
||||||
|
|
||||||
plugins: {
|
|
||||||
react: pluginReact,
|
|
||||||
'@typescript-eslint': typescriptEslint,
|
|
||||||
'react-hooks': pluginReactHooks,
|
|
||||||
import: pluginImport,
|
|
||||||
'unused-imports': pluginUnusedImports,
|
|
||||||
'simple-import-sort': pluginSimpleImportSort,
|
|
||||||
'react-refresh': pluginReactRefresh.configs.vite,
|
|
||||||
path: pluginPath,
|
|
||||||
i18next: pluginI18Next,
|
|
||||||
storybook: pluginStorybook,
|
|
||||||
},
|
|
||||||
|
|
||||||
rules: {
|
|
||||||
...typescriptEslint.configs.recommended.rules,
|
|
||||||
...pluginReact.configs.recommended.rules,
|
|
||||||
...pluginReact.configs['jsx-runtime'].rules,
|
|
||||||
...pluginReactHooks.configs.recommended.rules,
|
|
||||||
...pluginStorybook.configs.recommended.rules,
|
|
||||||
|
|
||||||
'react/jsx-no-bind': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
allowBind: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'react/jsx-curly-brace-presence': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
props: 'never',
|
|
||||||
children: 'never',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'react-hooks/exhaustive-deps': 'error',
|
|
||||||
|
|
||||||
curly: 'error',
|
|
||||||
'no-var': 'error',
|
|
||||||
'brace-style': 'error',
|
|
||||||
'prefer-template': 'error',
|
|
||||||
radix: 'error',
|
|
||||||
'space-before-blocks': 'error',
|
|
||||||
eqeqeq: 'error',
|
|
||||||
'one-var': ['error', 'never'],
|
|
||||||
'no-eval': 'error',
|
|
||||||
'no-extend-native': 'error',
|
|
||||||
'no-implied-eval': 'error',
|
|
||||||
'no-label-var': 'error',
|
|
||||||
'no-return-assign': 'error',
|
|
||||||
'no-sequences': 'error',
|
|
||||||
'no-template-curly-in-string': 'error',
|
|
||||||
'no-throw-literal': 'error',
|
|
||||||
'no-unmodified-loop-condition': 'error',
|
|
||||||
'import/no-duplicates': 'error',
|
|
||||||
'import/prefer-default-export': 'off',
|
|
||||||
'unused-imports/no-unused-imports': 'error',
|
|
||||||
|
|
||||||
'unused-imports/no-unused-vars': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
vars: 'all',
|
|
||||||
varsIgnorePattern: '^_',
|
|
||||||
args: 'after-used',
|
|
||||||
argsIgnorePattern: '^_',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'simple-import-sort/imports': 'error',
|
|
||||||
'simple-import-sort/exports': 'error',
|
|
||||||
'@typescript-eslint/no-unused-vars': 'off',
|
|
||||||
|
|
||||||
'@typescript-eslint/ban-ts-comment': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
'ts-expect-error': 'allow-with-description',
|
|
||||||
'ts-ignore': true,
|
|
||||||
'ts-nocheck': true,
|
|
||||||
'ts-check': false,
|
|
||||||
minimumDescriptionLength: 10,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'@typescript-eslint/no-empty-interface': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
allowSingleExtends: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'@typescript-eslint/consistent-type-imports': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
prefer: 'type-imports',
|
|
||||||
fixStyle: 'separate-type-imports',
|
|
||||||
disallowTypeAnnotations: true,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'@typescript-eslint/no-import-type-side-effects': 'error',
|
|
||||||
|
|
||||||
'@typescript-eslint/consistent-type-assertions': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
assertionStyle: 'as',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'path/no-relative-imports': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
maxDepth: 0,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'no-console': 'warn',
|
|
||||||
'no-promise-executor-return': 'error',
|
|
||||||
'require-await': 'error',
|
|
||||||
|
|
||||||
'no-restricted-syntax': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
selector: 'CallExpression[callee.name="setActiveTab"]',
|
|
||||||
message:
|
|
||||||
'setActiveTab() can only be called from use-navigation-api.tsx. Use navigationApi.switchToTab() instead.',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
'no-restricted-properties': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
object: 'crypto',
|
|
||||||
property: 'randomUUID',
|
|
||||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
object: 'navigator',
|
|
||||||
property: 'clipboard',
|
|
||||||
message:
|
|
||||||
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
|
|
||||||
// Typescript handles this for us: https://eslint.org/docs/latest/rules/no-redeclare#handled_by_typescript
|
|
||||||
'no-redeclare': 'off',
|
|
||||||
|
|
||||||
'no-restricted-imports': [
|
|
||||||
'error',
|
|
||||||
{
|
|
||||||
paths: [
|
|
||||||
{
|
|
||||||
name: 'lodash-es',
|
|
||||||
importNames: ['isEqual'],
|
|
||||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'lodash-es',
|
|
||||||
message: 'Please use es-toolkit instead.',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'es-toolkit',
|
|
||||||
importNames: ['isEqual'],
|
|
||||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'zod/v3',
|
|
||||||
message: 'Import from zod instead.',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
|
|
||||||
settings: {
|
|
||||||
react: {
|
|
||||||
version: 'detect',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
files: ['**/use-navigation-api.tsx'],
|
|
||||||
rules: {
|
|
||||||
'no-restricted-syntax': 'off',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
files: ['**/*.stories.tsx'],
|
|
||||||
rules: {
|
|
||||||
'i18next/no-literal-string': 'off',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
ignores: [
|
|
||||||
'**/dist/',
|
|
||||||
'**/static/',
|
|
||||||
'**/.husky/',
|
|
||||||
'**/node_modules/',
|
|
||||||
'**/patches/',
|
|
||||||
'**/stats.html',
|
|
||||||
'**/index.html',
|
|
||||||
'**/.yarn/',
|
|
||||||
'**/*.scss',
|
|
||||||
'src/services/api/schema.ts',
|
|
||||||
'.prettierrc.js',
|
|
||||||
'.storybook',
|
|
||||||
],
|
|
||||||
},
|
|
||||||
];
|
|
||||||
@@ -14,10 +14,8 @@ const config: KnipConfig = {
|
|||||||
'src/features/controlLayers/konva/util.ts',
|
'src/features/controlLayers/konva/util.ts',
|
||||||
// Will be using this
|
// Will be using this
|
||||||
'src/common/hooks/useAsyncState.ts',
|
'src/common/hooks/useAsyncState.ts',
|
||||||
'src/app/store/use-debounced-app-selector.ts',
|
|
||||||
],
|
],
|
||||||
ignoreBinaries: ['only-allow'],
|
ignoreBinaries: ['only-allow'],
|
||||||
ignoreDependencies: ['magic-string'],
|
|
||||||
paths: {
|
paths: {
|
||||||
'public/*': ['public/*'],
|
'public/*': ['public/*'],
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -47,25 +47,25 @@
|
|||||||
"@fontsource-variable/inter": "^5.2.6",
|
"@fontsource-variable/inter": "^5.2.6",
|
||||||
"@invoke-ai/ui-library": "^0.0.46",
|
"@invoke-ai/ui-library": "^0.0.46",
|
||||||
"@nanostores/react": "^1.0.0",
|
"@nanostores/react": "^1.0.0",
|
||||||
"@observ33r/object-equals": "^1.1.5",
|
"@observ33r/object-equals": "^1.1.4",
|
||||||
"@reduxjs/toolkit": "2.8.2",
|
"@reduxjs/toolkit": "2.8.2",
|
||||||
"@roarr/browser-log-writer": "^1.3.0",
|
"@roarr/browser-log-writer": "^1.3.0",
|
||||||
"@xyflow/react": "^12.8.2",
|
"@xyflow/react": "^12.7.1",
|
||||||
"ag-psd": "^28.2.2",
|
"ag-psd": "^28.2.1",
|
||||||
"async-mutex": "^0.5.0",
|
"async-mutex": "^0.5.0",
|
||||||
"chakra-react-select": "^4.9.2",
|
"chakra-react-select": "^4.9.2",
|
||||||
"cmdk": "^1.1.1",
|
"cmdk": "^1.1.1",
|
||||||
"compare-versions": "^6.1.1",
|
"compare-versions": "^6.1.1",
|
||||||
"dockview": "^4.4.1",
|
"dockview": "^4.4.0",
|
||||||
"es-toolkit": "^1.39.7",
|
"es-toolkit": "^1.39.5",
|
||||||
"filesize": "^10.1.6",
|
"filesize": "^10.1.6",
|
||||||
"fracturedjsonjs": "^4.1.0",
|
"fracturedjsonjs": "^4.1.0",
|
||||||
"framer-motion": "^11.10.0",
|
"framer-motion": "^11.10.0",
|
||||||
"i18next": "^25.3.2",
|
"i18next": "^25.2.1",
|
||||||
"i18next-http-backend": "^3.0.2",
|
"i18next-http-backend": "^3.0.2",
|
||||||
"idb-keyval": "6.2.1",
|
"idb-keyval": "^6.2.2",
|
||||||
"jsondiffpatch": "^0.7.3",
|
"jsondiffpatch": "^0.7.3",
|
||||||
"konva": "^9.3.22",
|
"konva": "^9.3.20",
|
||||||
"linkify-react": "^4.3.1",
|
"linkify-react": "^4.3.1",
|
||||||
"linkifyjs": "^4.3.1",
|
"linkifyjs": "^4.3.1",
|
||||||
"lru-cache": "^11.1.0",
|
"lru-cache": "^11.1.0",
|
||||||
@@ -83,7 +83,7 @@
|
|||||||
"react-dom": "^18.3.1",
|
"react-dom": "^18.3.1",
|
||||||
"react-dropzone": "^14.3.8",
|
"react-dropzone": "^14.3.8",
|
||||||
"react-error-boundary": "^5.0.0",
|
"react-error-boundary": "^5.0.0",
|
||||||
"react-hook-form": "^7.60.0",
|
"react-hook-form": "^7.58.1",
|
||||||
"react-hotkeys-hook": "4.5.0",
|
"react-hotkeys-hook": "4.5.0",
|
||||||
"react-i18next": "^15.5.3",
|
"react-i18next": "^15.5.3",
|
||||||
"react-icons": "^5.5.0",
|
"react-icons": "^5.5.0",
|
||||||
@@ -103,7 +103,7 @@
|
|||||||
"use-debounce": "^10.0.5",
|
"use-debounce": "^10.0.5",
|
||||||
"use-device-pixel-ratio": "^1.1.2",
|
"use-device-pixel-ratio": "^1.1.2",
|
||||||
"uuid": "^11.1.0",
|
"uuid": "^11.1.0",
|
||||||
"zod": "^4.0.10",
|
"zod": "^3.25.67",
|
||||||
"zod-validation-error": "^3.5.2"
|
"zod-validation-error": "^3.5.2"
|
||||||
},
|
},
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
@@ -111,44 +111,39 @@
|
|||||||
"react-dom": "^18.2.0"
|
"react-dom": "^18.2.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@eslint/js": "^9.31.0",
|
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||||
"@storybook/addon-docs": "^9.0.17",
|
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||||
"@storybook/addon-links": "^9.0.17",
|
"@storybook/addon-essentials": "^8.6.12",
|
||||||
"@storybook/react-vite": "^9.0.17",
|
"@storybook/addon-interactions": "^8.6.12",
|
||||||
|
"@storybook/addon-links": "^8.6.12",
|
||||||
|
"@storybook/addon-storysource": "^8.6.12",
|
||||||
|
"@storybook/manager-api": "^8.6.12",
|
||||||
|
"@storybook/react": "^8.6.12",
|
||||||
|
"@storybook/react-vite": "^8.6.12",
|
||||||
|
"@storybook/theming": "^8.6.12",
|
||||||
"@types/node": "^22.15.1",
|
"@types/node": "^22.15.1",
|
||||||
"@types/react": "^18.3.11",
|
"@types/react": "^18.3.11",
|
||||||
"@types/react-dom": "^18.3.0",
|
"@types/react-dom": "^18.3.0",
|
||||||
"@types/uuid": "^10.0.0",
|
"@types/uuid": "^10.0.0",
|
||||||
"@typescript-eslint/eslint-plugin": "^8.37.0",
|
|
||||||
"@typescript-eslint/parser": "^8.37.0",
|
|
||||||
"@vitejs/plugin-react-swc": "^3.9.0",
|
"@vitejs/plugin-react-swc": "^3.9.0",
|
||||||
"@vitest/coverage-v8": "^3.1.2",
|
"@vitest/coverage-v8": "^3.1.2",
|
||||||
"@vitest/ui": "^3.1.2",
|
"@vitest/ui": "^3.1.2",
|
||||||
"concurrently": "^9.1.2",
|
"concurrently": "^9.1.2",
|
||||||
"csstype": "^3.1.3",
|
"csstype": "^3.1.3",
|
||||||
"dpdm": "^3.14.0",
|
"dpdm": "^3.14.0",
|
||||||
"eslint": "^9.31.0",
|
"eslint": "^8.57.1",
|
||||||
"eslint-plugin-i18next": "^6.1.2",
|
"eslint-plugin-i18next": "^6.1.1",
|
||||||
"eslint-plugin-import": "^2.29.1",
|
"eslint-plugin-path": "^1.3.0",
|
||||||
"eslint-plugin-path": "^2.0.3",
|
|
||||||
"eslint-plugin-react": "^7.33.2",
|
|
||||||
"eslint-plugin-react-hooks": "^5.2.0",
|
|
||||||
"eslint-plugin-react-refresh": "^0.4.5",
|
|
||||||
"eslint-plugin-simple-import-sort": "^12.0.0",
|
|
||||||
"eslint-plugin-storybook": "^9.0.17",
|
|
||||||
"eslint-plugin-unused-imports": "^4.1.4",
|
|
||||||
"globals": "^16.3.0",
|
|
||||||
"knip": "^5.61.3",
|
"knip": "^5.61.3",
|
||||||
"magic-string": "^0.30.17",
|
|
||||||
"openapi-types": "^12.1.3",
|
"openapi-types": "^12.1.3",
|
||||||
"openapi-typescript": "^7.6.1",
|
"openapi-typescript": "^7.6.1",
|
||||||
"prettier": "^3.5.3",
|
"prettier": "^3.5.3",
|
||||||
"rollup-plugin-visualizer": "^6.0.3",
|
"rollup-plugin-visualizer": "^5.14.0",
|
||||||
"storybook": "^9.0.17",
|
"storybook": "^8.6.12",
|
||||||
"tsafe": "^1.8.5",
|
"tsafe": "^1.8.5",
|
||||||
"type-fest": "^4.40.0",
|
"type-fest": "^4.40.0",
|
||||||
"typescript": "^5.8.3",
|
"typescript": "^5.8.3",
|
||||||
"vite": "^7.0.5",
|
"vite": "^7.0.2",
|
||||||
"vite-plugin-css-injected-by-js": "^3.5.2",
|
"vite-plugin-css-injected-by-js": "^3.5.2",
|
||||||
"vite-plugin-dts": "^4.5.3",
|
"vite-plugin-dts": "^4.5.3",
|
||||||
"vite-plugin-eslint": "^1.8.1",
|
"vite-plugin-eslint": "^1.8.1",
|
||||||
|
|||||||
2171
invokeai/frontend/web/pnpm-lock.yaml
generated
2171
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -711,8 +711,7 @@
|
|||||||
"gaussianBlur": "Gaußsche Unschärfe",
|
"gaussianBlur": "Gaußsche Unschärfe",
|
||||||
"sendToUpscale": "An Hochskalieren senden",
|
"sendToUpscale": "An Hochskalieren senden",
|
||||||
"useCpuNoise": "CPU-Rauschen verwenden",
|
"useCpuNoise": "CPU-Rauschen verwenden",
|
||||||
"sendToCanvas": "An Leinwand senden",
|
"sendToCanvas": "An Leinwand senden"
|
||||||
"disabledNoRasterContent": "Deaktiviert (kein Rasterinhalt)"
|
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"displayInProgress": "Zwischenbilder anzeigen",
|
"displayInProgress": "Zwischenbilder anzeigen",
|
||||||
@@ -790,10 +789,7 @@
|
|||||||
"pasteSuccess": "Eingefügt in {{destination}}",
|
"pasteSuccess": "Eingefügt in {{destination}}",
|
||||||
"pasteFailed": "Einfügen fehlgeschlagen",
|
"pasteFailed": "Einfügen fehlgeschlagen",
|
||||||
"unableToCopy": "Kopieren nicht möglich",
|
"unableToCopy": "Kopieren nicht möglich",
|
||||||
"unableToCopyDesc_theseSteps": "diese Schritte",
|
"unableToCopyDesc_theseSteps": "diese Schritte"
|
||||||
"noRasterLayers": "Keine Rasterebenen gefunden",
|
|
||||||
"noActiveRasterLayers": "Keine aktiven Rasterebenen",
|
|
||||||
"noVisibleRasterLayers": "Keine sichtbaren Rasterebenen"
|
|
||||||
},
|
},
|
||||||
"accessibility": {
|
"accessibility": {
|
||||||
"uploadImage": "Bild hochladen",
|
"uploadImage": "Bild hochladen",
|
||||||
@@ -851,10 +847,7 @@
|
|||||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners",
|
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||||
"uncategorizedImages": "Nicht kategorisierte Bilder",
|
|
||||||
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen",
|
|
||||||
"deletedImagesCannotBeRestored": "Gelöschte Bilder können nicht wiederhergestellt werden."
|
|
||||||
},
|
},
|
||||||
"queue": {
|
"queue": {
|
||||||
"status": "Status",
|
"status": "Status",
|
||||||
@@ -1201,9 +1194,6 @@
|
|||||||
"Die Kantengröße des Kohärenzdurchlaufs."
|
"Die Kantengröße des Kohärenzdurchlaufs."
|
||||||
],
|
],
|
||||||
"heading": "Kantengröße"
|
"heading": "Kantengröße"
|
||||||
},
|
|
||||||
"rasterLayer": {
|
|
||||||
"heading": "Rasterebene"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"invocationCache": {
|
"invocationCache": {
|
||||||
@@ -1441,10 +1431,7 @@
|
|||||||
"autoLayout": "Auto Layout",
|
"autoLayout": "Auto Layout",
|
||||||
"copyShareLink": "Teilen-Link kopieren",
|
"copyShareLink": "Teilen-Link kopieren",
|
||||||
"download": "Herunterladen",
|
"download": "Herunterladen",
|
||||||
"convertGraph": "Graph konvertieren",
|
"convertGraph": "Graph konvertieren"
|
||||||
"filterByTags": "Nach Tags filtern",
|
|
||||||
"yourWorkflows": "Ihre Arbeitsabläufe",
|
|
||||||
"recentlyOpened": "Kürzlich geöffnet"
|
|
||||||
},
|
},
|
||||||
"sdxl": {
|
"sdxl": {
|
||||||
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
|
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
|
||||||
@@ -1457,15 +1444,7 @@
|
|||||||
"prompt": {
|
"prompt": {
|
||||||
"noMatchingTriggers": "Keine passenden Trigger",
|
"noMatchingTriggers": "Keine passenden Trigger",
|
||||||
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
||||||
"compatibleEmbeddings": "Kompatible Einbettungen",
|
"compatibleEmbeddings": "Kompatible Einbettungen"
|
||||||
"replace": "Ersetzen",
|
|
||||||
"insert": "Einfügen",
|
|
||||||
"discard": "Verwerfen",
|
|
||||||
"generateFromImage": "Prompt aus Bild generieren",
|
|
||||||
"expandCurrentPrompt": "Aktuelle Prompt erweitern",
|
|
||||||
"uploadImageForPromptGeneration": "Bild zur Prompt-Generierung hochladen",
|
|
||||||
"expandingPrompt": "Prompt wird erweitert...",
|
|
||||||
"resultTitle": "Prompt-Erweiterung abgeschlossen"
|
|
||||||
},
|
},
|
||||||
"ui": {
|
"ui": {
|
||||||
"tabs": {
|
"tabs": {
|
||||||
@@ -1594,30 +1573,30 @@
|
|||||||
"newGlobalReferenceImage": "Neues globales Referenzbild",
|
"newGlobalReferenceImage": "Neues globales Referenzbild",
|
||||||
"newRegionalReferenceImage": "Neues regionales Referenzbild",
|
"newRegionalReferenceImage": "Neues regionales Referenzbild",
|
||||||
"newControlLayer": "Neue Kontroll-Ebene",
|
"newControlLayer": "Neue Kontroll-Ebene",
|
||||||
"newRasterLayer": "Neue Rasterebene"
|
"newRasterLayer": "Neue Raster-Ebene"
|
||||||
},
|
},
|
||||||
"rectangle": "Rechteck",
|
"rectangle": "Rechteck",
|
||||||
"saveCanvasToGallery": "Leinwand in Galerie speichern",
|
"saveCanvasToGallery": "Leinwand in Galerie speichern",
|
||||||
"newRasterLayerError": "Problem beim Erstellen einer Rasterebene",
|
"newRasterLayerError": "Problem beim Erstellen einer Raster-Ebene",
|
||||||
"saveLayerToAssets": "Ebene in Galerie speichern",
|
"saveLayerToAssets": "Ebene in Galerie speichern",
|
||||||
"deleteReferenceImage": "Referenzbild löschen",
|
"deleteReferenceImage": "Referenzbild löschen",
|
||||||
"referenceImage": "Referenzbild",
|
"referenceImage": "Referenzbild",
|
||||||
"opacity": "Opazität",
|
"opacity": "Opazität",
|
||||||
"removeBookmark": "Lesezeichen entfernen",
|
"removeBookmark": "Lesezeichen entfernen",
|
||||||
"rasterLayer": "Rasterebene",
|
"rasterLayer": "Raster-Ebene",
|
||||||
"rasterLayers_withCount_visible": "Rasterebenen ({{count}})",
|
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||||
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
|
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
|
||||||
"deleteSelected": "Ausgewählte löschen",
|
"deleteSelected": "Ausgewählte löschen",
|
||||||
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
|
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
|
||||||
"newControlLayerOk": "Kontroll-Ebene erstellt",
|
"newControlLayerOk": "Kontroll-Ebene erstellt",
|
||||||
"newControlLayerError": "Problem beim Erstellen einer Kontroll-Ebene",
|
"newControlLayerError": "Problem beim Erstellen einer Kontroll-Ebene",
|
||||||
"newRasterLayerOk": "Rasterebene erstellt",
|
"newRasterLayerOk": "Raster-Layer erstellt",
|
||||||
"moveToFront": "Nach vorne bringen",
|
"moveToFront": "Nach vorne bringen",
|
||||||
"copyToClipboard": "In die Zwischenablage kopieren",
|
"copyToClipboard": "In die Zwischenablage kopieren",
|
||||||
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
|
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
|
||||||
"clearCaches": "Cache leeren",
|
"clearCaches": "Cache leeren",
|
||||||
"controlLayer": "Kontroll-Ebene",
|
"controlLayer": "Kontroll-Ebene",
|
||||||
"rasterLayers_withCount_hidden": "Rasterebenen ({{count}} ausgeblendet)",
|
"rasterLayers_withCount_hidden": "Raster-Ebenen ({{count}} ausgeblendet)",
|
||||||
"transparency": "Transparenz",
|
"transparency": "Transparenz",
|
||||||
"canvas": "Leinwand",
|
"canvas": "Leinwand",
|
||||||
"global": "Global",
|
"global": "Global",
|
||||||
@@ -1703,14 +1682,7 @@
|
|||||||
"filterType": "Filtertyp",
|
"filterType": "Filtertyp",
|
||||||
"filter": "Filter"
|
"filter": "Filter"
|
||||||
},
|
},
|
||||||
"bookmark": "Lesezeichen für Schnell-Umschalten",
|
"bookmark": "Lesezeichen für Schnell-Umschalten"
|
||||||
"asRasterLayer": "Als $t(controlLayers.rasterLayer)",
|
|
||||||
"asRasterLayerResize": "Als $t(controlLayers.rasterLayer) (Größe anpassen)",
|
|
||||||
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
|
|
||||||
"rasterLayer_withCount_other": "Rasterebenen",
|
|
||||||
"newRasterLayer": "Neue $t(controlLayers.rasterLayer)",
|
|
||||||
"showNonRasterLayers": "Nicht-Rasterebenen anzeigen (Umschalt+H)",
|
|
||||||
"hideNonRasterLayers": "Nicht-Rasterebenen ausblenden (Umschalt+H)"
|
|
||||||
},
|
},
|
||||||
"upsell": {
|
"upsell": {
|
||||||
"shareAccess": "Zugang teilen",
|
"shareAccess": "Zugang teilen",
|
||||||
|
|||||||
@@ -253,7 +253,6 @@
|
|||||||
"cancel": "Cancel",
|
"cancel": "Cancel",
|
||||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
|
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
|
||||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
|
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
|
||||||
"cancelAllExceptCurrent": "Cancel All Except Current",
|
|
||||||
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
|
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
|
||||||
"cancelTooltip": "Cancel Current Item",
|
"cancelTooltip": "Cancel Current Item",
|
||||||
"cancelSucceeded": "Item Canceled",
|
"cancelSucceeded": "Item Canceled",
|
||||||
@@ -274,7 +273,7 @@
|
|||||||
"retryItem": "Retry Item",
|
"retryItem": "Retry Item",
|
||||||
"cancelBatchSucceeded": "Batch Canceled",
|
"cancelBatchSucceeded": "Batch Canceled",
|
||||||
"cancelBatchFailed": "Problem Canceling Batch",
|
"cancelBatchFailed": "Problem Canceling Batch",
|
||||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled and the Canvas Staging Area will be reset.",
|
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
|
||||||
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
|
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
|
||||||
"current": "Current",
|
"current": "Current",
|
||||||
"next": "Next",
|
"next": "Next",
|
||||||
@@ -471,11 +470,6 @@
|
|||||||
"togglePanels": {
|
"togglePanels": {
|
||||||
"title": "Toggle Panels",
|
"title": "Toggle Panels",
|
||||||
"desc": "Show or hide both left and right panels at once."
|
"desc": "Show or hide both left and right panels at once."
|
||||||
},
|
|
||||||
"selectGenerateTab": {
|
|
||||||
"title": "Select the Generate Tab",
|
|
||||||
"desc": "Selects the Generate tab.",
|
|
||||||
"key": "1"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"canvas": {
|
"canvas": {
|
||||||
@@ -580,10 +574,6 @@
|
|||||||
"title": "Transform",
|
"title": "Transform",
|
||||||
"desc": "Transform the selected layer."
|
"desc": "Transform the selected layer."
|
||||||
},
|
},
|
||||||
"invertMask": {
|
|
||||||
"title": "Invert Mask",
|
|
||||||
"desc": "Invert the selected inpaint mask, creating a new mask with opposite transparency."
|
|
||||||
},
|
|
||||||
"applyFilter": {
|
"applyFilter": {
|
||||||
"title": "Apply Filter",
|
"title": "Apply Filter",
|
||||||
"desc": "Apply the pending filter to the selected layer."
|
"desc": "Apply the pending filter to the selected layer."
|
||||||
@@ -609,20 +599,6 @@
|
|||||||
"toggleNonRasterLayers": {
|
"toggleNonRasterLayers": {
|
||||||
"title": "Toggle Non-Raster Layers",
|
"title": "Toggle Non-Raster Layers",
|
||||||
"desc": "Show or hide all non-raster layer categories (Control Layers, Inpaint Masks, Regional Guidance)."
|
"desc": "Show or hide all non-raster layer categories (Control Layers, Inpaint Masks, Regional Guidance)."
|
||||||
},
|
|
||||||
"fitBboxToMasks": {
|
|
||||||
"title": "Fit Bbox To Masks",
|
|
||||||
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
|
|
||||||
},
|
|
||||||
"applySegmentAnything": {
|
|
||||||
"title": "Apply Segment Anything",
|
|
||||||
"desc": "Apply the current Segment Anything mask.",
|
|
||||||
"key": "enter"
|
|
||||||
},
|
|
||||||
"cancelSegmentAnything": {
|
|
||||||
"title": "Cancel Segment Anything",
|
|
||||||
"desc": "Cancel the current Segment Anything operation.",
|
|
||||||
"key": "esc"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"workflows": {
|
"workflows": {
|
||||||
@@ -752,10 +728,6 @@
|
|||||||
"deleteSelection": {
|
"deleteSelection": {
|
||||||
"title": "Delete",
|
"title": "Delete",
|
||||||
"desc": "Delete all selected images. By default, you will be prompted to confirm deletion. If the images are currently in use in the app, you will be warned."
|
"desc": "Delete all selected images. By default, you will be prompted to confirm deletion. If the images are currently in use in the app, you will be warned."
|
||||||
},
|
|
||||||
"starImage": {
|
|
||||||
"title": "Star/Unstar Image",
|
|
||||||
"desc": "Star or unstar the selected image."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -1153,23 +1125,7 @@
|
|||||||
"addItem": "Add Item",
|
"addItem": "Add Item",
|
||||||
"generateValues": "Generate Values",
|
"generateValues": "Generate Values",
|
||||||
"floatRangeGenerator": "Float Range Generator",
|
"floatRangeGenerator": "Float Range Generator",
|
||||||
"integerRangeGenerator": "Integer Range Generator",
|
"integerRangeGenerator": "Integer Range Generator"
|
||||||
"layout": {
|
|
||||||
"autoLayout": "Auto Layout",
|
|
||||||
"layeringStrategy": "Layering Strategy",
|
|
||||||
"networkSimplex": "Network Simplex",
|
|
||||||
"longestPath": "Longest Path",
|
|
||||||
"nodeSpacing": "Node Spacing",
|
|
||||||
"layerSpacing": "Layer Spacing",
|
|
||||||
"layoutDirection": "Layout Direction",
|
|
||||||
"layoutDirectionRight": "Right",
|
|
||||||
"layoutDirectionDown": "Down",
|
|
||||||
"alignment": "Node Alignment",
|
|
||||||
"alignmentUL": "Top Left",
|
|
||||||
"alignmentDL": "Bottom Left",
|
|
||||||
"alignmentUR": "Top Right",
|
|
||||||
"alignmentDR": "Bottom Right"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"aspect": "Aspect",
|
"aspect": "Aspect",
|
||||||
@@ -1235,7 +1191,7 @@
|
|||||||
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
||||||
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||||
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
||||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with FLUX Kontext via BFL API",
|
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
|
||||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||||
@@ -1451,15 +1407,7 @@
|
|||||||
"sentToUpscale": "Sent to Upscale",
|
"sentToUpscale": "Sent to Upscale",
|
||||||
"promptGenerationStarted": "Prompt generation started",
|
"promptGenerationStarted": "Prompt generation started",
|
||||||
"uploadAndPromptGenerationFailed": "Failed to upload image and generate prompt",
|
"uploadAndPromptGenerationFailed": "Failed to upload image and generate prompt",
|
||||||
"promptExpansionFailed": "We ran into an issue. Please try prompt expansion again.",
|
"promptExpansionFailed": "We ran into an issue. Please try prompt expansion again."
|
||||||
"maskInverted": "Mask Inverted",
|
|
||||||
"maskInvertFailed": "Failed to Invert Mask",
|
|
||||||
"noVisibleMasks": "No Visible Masks",
|
|
||||||
"noVisibleMasksDesc": "Create or enable at least one inpaint mask to invert",
|
|
||||||
"noInpaintMaskSelected": "No Inpaint Mask Selected",
|
|
||||||
"noInpaintMaskSelectedDesc": "Select an inpaint mask to invert",
|
|
||||||
"invalidBbox": "Invalid Bounding Box",
|
|
||||||
"invalidBboxDesc": "The bounding box has no valid dimensions"
|
|
||||||
},
|
},
|
||||||
"popovers": {
|
"popovers": {
|
||||||
"clipSkip": {
|
"clipSkip": {
|
||||||
@@ -1827,20 +1775,6 @@
|
|||||||
"Structure controls how closely the output image will keep to the layout of the original. Low structure allows major changes, while high structure strictly maintains the original composition and layout."
|
"Structure controls how closely the output image will keep to the layout of the original. Low structure allows major changes, while high structure strictly maintains the original composition and layout."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"tileSize": {
|
|
||||||
"heading": "Tile Size",
|
|
||||||
"paragraphs": [
|
|
||||||
"Controls the size of tiles used during the upscaling process. Larger tiles use more memory but may produce better results.",
|
|
||||||
"SD1.5 models default to 768, while SDXL models default to 1024. Reduce tile size if you encounter memory issues."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"tileOverlap": {
|
|
||||||
"heading": "Tile Overlap",
|
|
||||||
"paragraphs": [
|
|
||||||
"Controls the overlap between adjacent tiles during upscaling. Higher overlap values help reduce visible seams between tiles but use more memory.",
|
|
||||||
"The default value of 128 works well for most cases, but you can adjust based on your specific needs and memory constraints."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"fluxDevLicense": {
|
"fluxDevLicense": {
|
||||||
"heading": "Non-Commercial License",
|
"heading": "Non-Commercial License",
|
||||||
"paragraphs": [
|
"paragraphs": [
|
||||||
@@ -1992,7 +1926,6 @@
|
|||||||
"canvas": "Canvas",
|
"canvas": "Canvas",
|
||||||
"bookmark": "Bookmark for Quick Switch",
|
"bookmark": "Bookmark for Quick Switch",
|
||||||
"fitBboxToLayers": "Fit Bbox To Layers",
|
"fitBboxToLayers": "Fit Bbox To Layers",
|
||||||
"fitBboxToMasks": "Fit Bbox To Masks",
|
|
||||||
"removeBookmark": "Remove Bookmark",
|
"removeBookmark": "Remove Bookmark",
|
||||||
"saveCanvasToGallery": "Save Canvas to Gallery",
|
"saveCanvasToGallery": "Save Canvas to Gallery",
|
||||||
"saveBboxToGallery": "Save Bbox to Gallery",
|
"saveBboxToGallery": "Save Bbox to Gallery",
|
||||||
@@ -2029,6 +1962,7 @@
|
|||||||
"recalculateRects": "Recalculate Rects",
|
"recalculateRects": "Recalculate Rects",
|
||||||
"clipToBbox": "Clip Strokes to Bbox",
|
"clipToBbox": "Clip Strokes to Bbox",
|
||||||
"outputOnlyMaskedRegions": "Output Only Generated Regions",
|
"outputOnlyMaskedRegions": "Output Only Generated Regions",
|
||||||
|
"saveAllImagesToGallery": "Save All Images to Gallery",
|
||||||
"addLayer": "Add Layer",
|
"addLayer": "Add Layer",
|
||||||
"duplicate": "Duplicate",
|
"duplicate": "Duplicate",
|
||||||
"moveToFront": "Move to Front",
|
"moveToFront": "Move to Front",
|
||||||
@@ -2057,7 +1991,6 @@
|
|||||||
"rasterLayer": "Raster Layer",
|
"rasterLayer": "Raster Layer",
|
||||||
"controlLayer": "Control Layer",
|
"controlLayer": "Control Layer",
|
||||||
"inpaintMask": "Inpaint Mask",
|
"inpaintMask": "Inpaint Mask",
|
||||||
"invertMask": "Invert Mask",
|
|
||||||
"regionalGuidance": "Regional Guidance",
|
"regionalGuidance": "Regional Guidance",
|
||||||
"referenceImageRegional": "Reference Image (Regional)",
|
"referenceImageRegional": "Reference Image (Regional)",
|
||||||
"referenceImageGlobal": "Reference Image (Global)",
|
"referenceImageGlobal": "Reference Image (Global)",
|
||||||
@@ -2154,9 +2087,9 @@
|
|||||||
"resetCanvasLayers": "Reset Canvas Layers",
|
"resetCanvasLayers": "Reset Canvas Layers",
|
||||||
"resetGenerationSettings": "Reset Generation Settings",
|
"resetGenerationSettings": "Reset Generation Settings",
|
||||||
"replaceCurrent": "Replace Current",
|
"replaceCurrent": "Replace Current",
|
||||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
|
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
|
||||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the gallery onto this Reference Image to get started.",
|
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image to get started.",
|
||||||
"uploadOrDragAnImage": "Drag an image from the gallery or <UploadButton>upload an image</UploadButton>.",
|
"uploadOrDragAnImage": "Drag an image from the gallery or <UploadButton>upload an image</UploadButton>.",
|
||||||
"imageNoise": "Image Noise",
|
"imageNoise": "Image Noise",
|
||||||
"denoiseLimit": "Denoise Limit",
|
"denoiseLimit": "Denoise Limit",
|
||||||
@@ -2399,8 +2332,7 @@
|
|||||||
"alert": "Preserving Masked Region"
|
"alert": "Preserving Masked Region"
|
||||||
},
|
},
|
||||||
"saveAllImagesToGallery": {
|
"saveAllImagesToGallery": {
|
||||||
"label": "Send New Generations to Gallery",
|
"alert": "Saving All Images to Gallery"
|
||||||
"alert": "Sending new generations to Gallery, bypassing Canvas"
|
|
||||||
},
|
},
|
||||||
"isolatedStagingPreview": "Isolated Staging Preview",
|
"isolatedStagingPreview": "Isolated Staging Preview",
|
||||||
"isolatedPreview": "Isolated Preview",
|
"isolatedPreview": "Isolated Preview",
|
||||||
@@ -2464,9 +2396,6 @@
|
|||||||
"upscaleModel": "Upscale Model",
|
"upscaleModel": "Upscale Model",
|
||||||
"postProcessingModel": "Post-Processing Model",
|
"postProcessingModel": "Post-Processing Model",
|
||||||
"scale": "Scale",
|
"scale": "Scale",
|
||||||
"tileControl": "Tile Control",
|
|
||||||
"tileSize": "Tile Size",
|
|
||||||
"tileOverlap": "Tile Overlap",
|
|
||||||
"postProcessingMissingModelWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install a post-processing (image to image) model.",
|
"postProcessingMissingModelWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install a post-processing (image to image) model.",
|
||||||
"missingModelsWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install the required models:",
|
"missingModelsWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install the required models:",
|
||||||
"mainModelDesc": "Main model (SD1.5 or SDXL architecture)",
|
"mainModelDesc": "Main model (SD1.5 or SDXL architecture)",
|
||||||
@@ -2631,8 +2560,9 @@
|
|||||||
"whatsNew": {
|
"whatsNew": {
|
||||||
"whatsNewInInvoke": "What's New in Invoke",
|
"whatsNewInInvoke": "What's New in Invoke",
|
||||||
"items": [
|
"items": [
|
||||||
"Studio state is saved to the server, allowing you to continue your work on any device.",
|
"Generate images faster with new Launchpads and a simplified Generate tab.",
|
||||||
"Support for multiple reference images for FLUX Kontext (local model only)."
|
"Edit with prompts using Flux Kontext Dev.",
|
||||||
|
"Export to PSD, bulk-hide overlays, organize models & images — all in a reimagined interface built for control."
|
||||||
],
|
],
|
||||||
"readReleaseNotes": "Read Release Notes",
|
"readReleaseNotes": "Read Release Notes",
|
||||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||||
|
|||||||
@@ -2375,8 +2375,65 @@
|
|||||||
},
|
},
|
||||||
"supportVideos": {
|
"supportVideos": {
|
||||||
"watch": "Regarder",
|
"watch": "Regarder",
|
||||||
|
"videos": {
|
||||||
|
"upscaling": {
|
||||||
|
"description": "Comment améliorer la résolution des images avec les outils d'Invoke pour les agrandir.",
|
||||||
|
"title": "Upscaling"
|
||||||
|
},
|
||||||
|
"howDoIGenerateAndSaveToTheGallery": {
|
||||||
|
"description": "Étapes pour générer et enregistrer des images dans la galerie.",
|
||||||
|
"title": "Comment générer et enregistrer dans la galerie ?"
|
||||||
|
},
|
||||||
|
"usingControlLayersAndReferenceGuides": {
|
||||||
|
"title": "Utilisation des couche de contrôle et des guides de référence",
|
||||||
|
"description": "Apprenez à guider la création de vos images avec des couche de contrôle et des images de référence."
|
||||||
|
},
|
||||||
|
"exploringAIModelsAndConceptAdapters": {
|
||||||
|
"description": "Plongez dans les modèles d'IA et découvrez comment utiliser les adaptateurs de concepts pour un contrôle créatif.",
|
||||||
|
"title": "Exploration des modèles d'IA et des adaptateurs de concepts"
|
||||||
|
},
|
||||||
|
"howDoIUseControlNetsAndControlLayers": {
|
||||||
|
"title": "Comment utiliser les réseaux de contrôle et les couches de contrôle ?",
|
||||||
|
"description": "Apprenez à appliquer des couches de contrôle et des ControlNets à vos images."
|
||||||
|
},
|
||||||
|
"creatingAndComposingOnInvokesControlCanvas": {
|
||||||
|
"description": "Apprenez à composer des images en utilisant le canvas de contrôle d'Invoke.",
|
||||||
|
"title": "Créer et composer sur le canvas de contrôle d'Invoke"
|
||||||
|
},
|
||||||
|
"howDoIEditOnTheCanvas": {
|
||||||
|
"title": "Comment puis-je modifier sur la toile ?",
|
||||||
|
"description": "Guide pour éditer des images directement sur la toile."
|
||||||
|
},
|
||||||
|
"howDoIDoImageToImageTransformation": {
|
||||||
|
"title": "Comment effectuer une transformation d'image à image ?",
|
||||||
|
"description": "Tutoriel sur la réalisation de transformations d'image à image dans Invoke."
|
||||||
|
},
|
||||||
|
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||||
|
"title": "Comment utiliser les IP Adapters globaux et les images de référence ?",
|
||||||
|
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
|
||||||
|
},
|
||||||
|
"howDoIUseInpaintMasks": {
|
||||||
|
"title": "Comment utiliser les masques d'inpainting ?",
|
||||||
|
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
|
||||||
|
},
|
||||||
|
"creatingYourFirstImage": {
|
||||||
|
"title": "Créer votre première image",
|
||||||
|
"description": "Introduction à la création d'une image à partir de zéro en utilisant les outils d'Invoke."
|
||||||
|
},
|
||||||
|
"understandingImageToImageAndDenoising": {
|
||||||
|
"title": "Comprendre l'Image-à-Image et le Débruitage",
|
||||||
|
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
|
||||||
|
},
|
||||||
|
"howDoIOutpaint": {
|
||||||
|
"title": "Comment effectuer un outpainting ?",
|
||||||
|
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
|
||||||
|
}
|
||||||
|
},
|
||||||
"gettingStarted": "Commencer",
|
"gettingStarted": "Commencer",
|
||||||
"supportVideos": "Vidéos d'assistance"
|
"studioSessionsDesc1": "Consultez le <StudioSessionsPlaylistLink /> pour des approfondissements sur Invoke.",
|
||||||
|
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
|
||||||
|
"supportVideos": "Vidéos d'assistance",
|
||||||
|
"controlCanvas": "Contrôler la toile"
|
||||||
},
|
},
|
||||||
"modelCache": {
|
"modelCache": {
|
||||||
"clear": "Effacer le cache du modèle",
|
"clear": "Effacer le cache du modèle",
|
||||||
|
|||||||
@@ -152,7 +152,7 @@
|
|||||||
"image": "immagine",
|
"image": "immagine",
|
||||||
"drop": "Rilascia",
|
"drop": "Rilascia",
|
||||||
"unstarImage": "Rimuovi contrassegno immagine",
|
"unstarImage": "Rimuovi contrassegno immagine",
|
||||||
"dropOrUpload": "Rilascia o carica",
|
"dropOrUpload": "$t(gallery.drop) o carica",
|
||||||
"starImage": "Contrassegna l'immagine",
|
"starImage": "Contrassegna l'immagine",
|
||||||
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
||||||
"bulkDownloadRequested": "Preparazione del download",
|
"bulkDownloadRequested": "Preparazione del download",
|
||||||
@@ -197,8 +197,7 @@
|
|||||||
"boardsSettings": "Impostazioni Bacheche",
|
"boardsSettings": "Impostazioni Bacheche",
|
||||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||||
"assets": "Risorse",
|
"assets": "Risorse",
|
||||||
"images": "Immagini",
|
"images": "Immagini"
|
||||||
"useForPromptGeneration": "Usa per generare il prompt"
|
|
||||||
},
|
},
|
||||||
"hotkeys": {
|
"hotkeys": {
|
||||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||||
@@ -254,16 +253,12 @@
|
|||||||
"desc": "Attiva/disattiva il pannello destro."
|
"desc": "Attiva/disattiva il pannello destro."
|
||||||
},
|
},
|
||||||
"resetPanelLayout": {
|
"resetPanelLayout": {
|
||||||
"title": "Ripristina lo schema del pannello",
|
"title": "Ripristina il layout del pannello",
|
||||||
"desc": "Ripristina le dimensioni e lo schema predefiniti dei pannelli sinistro e destro."
|
"desc": "Ripristina le dimensioni e il layout predefiniti dei pannelli sinistro e destro."
|
||||||
},
|
},
|
||||||
"togglePanels": {
|
"togglePanels": {
|
||||||
"title": "Attiva/disattiva i pannelli",
|
"title": "Attiva/disattiva i pannelli",
|
||||||
"desc": "Mostra o nascondi contemporaneamente i pannelli sinistro e destro."
|
"desc": "Mostra o nascondi contemporaneamente i pannelli sinistro e destro."
|
||||||
},
|
|
||||||
"selectGenerateTab": {
|
|
||||||
"title": "Seleziona la scheda Genera",
|
|
||||||
"desc": "Seleziona la scheda Genera."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"hotkeys": "Tasti di scelta rapida",
|
"hotkeys": "Tasti di scelta rapida",
|
||||||
@@ -384,32 +379,6 @@
|
|||||||
"applyTransform": {
|
"applyTransform": {
|
||||||
"title": "Applica trasformazione",
|
"title": "Applica trasformazione",
|
||||||
"desc": "Applica la trasformazione in sospeso al livello selezionato."
|
"desc": "Applica la trasformazione in sospeso al livello selezionato."
|
||||||
},
|
|
||||||
"toggleNonRasterLayers": {
|
|
||||||
"desc": "Mostra o nascondi tutte le categorie di livelli non raster (Livelli di controllo, Maschere di Inpaint, Guida regionale).",
|
|
||||||
"title": "Attiva/disattiva livelli non raster"
|
|
||||||
},
|
|
||||||
"settings": {
|
|
||||||
"behavior": "Comportamento",
|
|
||||||
"display": "Mostra",
|
|
||||||
"grid": "Griglia"
|
|
||||||
},
|
|
||||||
"invertMask": {
|
|
||||||
"title": "Inverti maschera",
|
|
||||||
"desc": "Inverte la maschera di inpaint selezionata, creando una nuova maschera con trasparenza opposta."
|
|
||||||
},
|
|
||||||
"fitBboxToMasks": {
|
|
||||||
"title": "Adatta il riquadro di delimitazione alle maschere",
|
|
||||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo alle maschere di inpaint visibili"
|
|
||||||
},
|
|
||||||
"applySegmentAnything": {
|
|
||||||
"title": "Applica Segment Anything",
|
|
||||||
"desc": "Applica la maschera Segment Anything corrente.",
|
|
||||||
"key": "invio"
|
|
||||||
},
|
|
||||||
"cancelSegmentAnything": {
|
|
||||||
"title": "Annulla Segment Anything",
|
|
||||||
"desc": "Annulla l'operazione Segment Anything corrente."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"workflows": {
|
"workflows": {
|
||||||
@@ -539,10 +508,6 @@
|
|||||||
"galleryNavUpAlt": {
|
"galleryNavUpAlt": {
|
||||||
"desc": "Uguale a Naviga verso l'alto, ma seleziona l'immagine da confrontare, aprendo la modalità di confronto se non è già aperta.",
|
"desc": "Uguale a Naviga verso l'alto, ma seleziona l'immagine da confrontare, aprendo la modalità di confronto se non è già aperta.",
|
||||||
"title": "Naviga verso l'alto (Confronta immagine)"
|
"title": "Naviga verso l'alto (Confronta immagine)"
|
||||||
},
|
|
||||||
"starImage": {
|
|
||||||
"desc": "Aggiungi/Rimuovi contrassegno all'immagine selezionata.",
|
|
||||||
"title": "Aggiungi / Rimuovi contrassegno immagine"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -658,7 +623,7 @@
|
|||||||
"installingXModels_one": "Installazione di {{count}} modello",
|
"installingXModels_one": "Installazione di {{count}} modello",
|
||||||
"installingXModels_many": "Installazione di {{count}} modelli",
|
"installingXModels_many": "Installazione di {{count}} modelli",
|
||||||
"installingXModels_other": "Installazione di {{count}} modelli",
|
"installingXModels_other": "Installazione di {{count}} modelli",
|
||||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze.",
|
"includesNModels": "Include {{n}} modelli e le loro dipendenze",
|
||||||
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
|
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
|
||||||
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
|
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
|
||||||
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
|
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
|
||||||
@@ -691,27 +656,7 @@
|
|||||||
"manageModels": "Gestione modelli",
|
"manageModels": "Gestione modelli",
|
||||||
"hfTokenReset": "Ripristino del gettone HF",
|
"hfTokenReset": "Ripristino del gettone HF",
|
||||||
"relatedModels": "Modelli correlati",
|
"relatedModels": "Modelli correlati",
|
||||||
"showOnlyRelatedModels": "Correlati",
|
"showOnlyRelatedModels": "Correlati"
|
||||||
"installedModelsCount": "{{installed}} di {{total}} modelli installati.",
|
|
||||||
"allNModelsInstalled": "Tutti i {{count}} modelli installati",
|
|
||||||
"nToInstall": "{{count}} da installare",
|
|
||||||
"nAlreadyInstalled": "{{count}} già installati",
|
|
||||||
"bundleAlreadyInstalled": "Pacchetto già installato",
|
|
||||||
"bundleAlreadyInstalledDesc": "Tutti i modelli nel pacchetto {{bundleName}} sono già installati.",
|
|
||||||
"launchpad": {
|
|
||||||
"description": "Per utilizzare la maggior parte delle funzionalità della piattaforma, Invoke richiede l'installazione di modelli. Scegli tra le opzioni di installazione manuale o esplora i modelli di avvio selezionati.",
|
|
||||||
"manualInstall": "Installazione manuale",
|
|
||||||
"urlDescription": "Installa i modelli da un URL o da un percorso file locale. Perfetto per modelli specifici che desideri aggiungere.",
|
|
||||||
"huggingFaceDescription": "Esplora e installa i modelli direttamente dai repository di HuggingFace.",
|
|
||||||
"scanFolderDescription": "Esegui la scansione di una cartella locale per rilevare e installare automaticamente i modelli.",
|
|
||||||
"recommendedModels": "Modelli consigliati",
|
|
||||||
"exploreStarter": "Oppure sfoglia tutti i modelli iniziali disponibili",
|
|
||||||
"welcome": "Benvenuti in Gestione Modelli",
|
|
||||||
"quickStart": "Pacchetti di avvio rapido",
|
|
||||||
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
|
|
||||||
"browseAll": "Oppure scopri tutti i modelli disponibili:"
|
|
||||||
},
|
|
||||||
"launchpadTab": "Rampa di lancio"
|
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "Immagini",
|
"images": "Immagini",
|
||||||
@@ -797,10 +742,7 @@
|
|||||||
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||||
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||||
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
|
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade."
|
||||||
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con FLUX Kontext tramite BFL API",
|
|
||||||
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
|
|
||||||
"promptExpansionPending": "Espansione del prompt in corso"
|
|
||||||
},
|
},
|
||||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||||
"iterations": "Iterazioni",
|
"iterations": "Iterazioni",
|
||||||
@@ -942,34 +884,7 @@
|
|||||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
|
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||||
"noRasterLayers": "Nessun livello raster trovato",
|
|
||||||
"noRasterLayersDesc": "Crea almeno un livello raster da esportare in PSD",
|
|
||||||
"noActiveRasterLayers": "Nessun livello raster attivo",
|
|
||||||
"noActiveRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
|
||||||
"noVisibleRasterLayers": "Nessun livello raster visibile",
|
|
||||||
"noVisibleRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
|
||||||
"invalidCanvasDimensions": "Dimensioni della tela non valide",
|
|
||||||
"canvasTooLarge": "Tela troppo grande",
|
|
||||||
"canvasTooLargeDesc": "Le dimensioni della tela superano le dimensioni massime consentite per l'esportazione in formato PSD. Riduci la larghezza e l'altezza totali della tela e riprova.",
|
|
||||||
"failedToProcessLayers": "Impossibile elaborare i livelli",
|
|
||||||
"psdExportSuccess": "Esportazione PSD completata",
|
|
||||||
"psdExportSuccessDesc": "Esportazione riuscita di {{count}} livelli nel file PSD",
|
|
||||||
"problemExportingPSD": "Problema durante l'esportazione PSD",
|
|
||||||
"noValidLayerAdapters": "Nessun adattatore di livello valido trovato",
|
|
||||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext non supporta la generazione di immagini posizionate sulla tela. Riprova utilizzando la sezione Immagine di riferimento e disattiva tutti i livelli raster.",
|
|
||||||
"canvasManagerNotAvailable": "Gestione tela non disponibile",
|
|
||||||
"promptExpansionFailed": "Abbiamo riscontrato un problema. Riprova a eseguire l'espansione del prompt.",
|
|
||||||
"uploadAndPromptGenerationFailed": "Impossibile caricare l'immagine e generare il prompt",
|
|
||||||
"promptGenerationStarted": "Generazione del prompt avviata",
|
|
||||||
"invalidBboxDesc": "Il riquadro di delimitazione non ha dimensioni valide",
|
|
||||||
"invalidBbox": "Riquadro di delimitazione non valido",
|
|
||||||
"noInpaintMaskSelectedDesc": "Seleziona una maschera di inpaint da invertire",
|
|
||||||
"noInpaintMaskSelected": "Nessuna maschera di inpaint selezionata",
|
|
||||||
"noVisibleMasksDesc": "Crea o abilita almeno una maschera inpaint da invertire",
|
|
||||||
"noVisibleMasks": "Nessuna maschera visibile",
|
|
||||||
"maskInvertFailed": "Impossibile invertire la maschera",
|
|
||||||
"maskInverted": "Maschera invertita"
|
|
||||||
},
|
},
|
||||||
"accessibility": {
|
"accessibility": {
|
||||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||||
@@ -1164,22 +1079,7 @@
|
|||||||
"missingField_withName": "Campo \"{{name}}\" mancante",
|
"missingField_withName": "Campo \"{{name}}\" mancante",
|
||||||
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
|
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
|
||||||
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
|
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
|
||||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante",
|
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante"
|
||||||
"layout": {
|
|
||||||
"alignmentDR": "In basso a destra",
|
|
||||||
"autoLayout": "Schema automatico",
|
|
||||||
"nodeSpacing": "Spaziatura nodi",
|
|
||||||
"layerSpacing": "Spaziatura livelli",
|
|
||||||
"layeringStrategy": "Strategia livelli",
|
|
||||||
"longestPath": "Percorso più lungo",
|
|
||||||
"layoutDirection": "Direzione schema",
|
|
||||||
"layoutDirectionRight": "Orizzontale",
|
|
||||||
"layoutDirectionDown": "Verticale",
|
|
||||||
"alignment": "Allineamento nodi",
|
|
||||||
"alignmentUL": "In alto a sinistra",
|
|
||||||
"alignmentDL": "In basso a sinistra",
|
|
||||||
"alignmentUR": "In alto a destra"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"boards": {
|
"boards": {
|
||||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||||
@@ -1256,7 +1156,7 @@
|
|||||||
"batchQueuedDesc_other": "Aggiunte {{count}} sessioni a {{direction}} della coda",
|
"batchQueuedDesc_other": "Aggiunte {{count}} sessioni a {{direction}} della coda",
|
||||||
"graphQueued": "Grafico in coda",
|
"graphQueued": "Grafico in coda",
|
||||||
"batch": "Lotto",
|
"batch": "Lotto",
|
||||||
"clearQueueAlertDialog": "La cancellazione della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati e l'area di lavoro della Tela verrà reimpostata.",
|
"clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati.",
|
||||||
"pending": "In attesa",
|
"pending": "In attesa",
|
||||||
"completedIn": "Completato in",
|
"completedIn": "Completato in",
|
||||||
"resumeFailed": "Problema nel riavvio dell'elaborazione",
|
"resumeFailed": "Problema nel riavvio dell'elaborazione",
|
||||||
@@ -1312,8 +1212,7 @@
|
|||||||
"retrySucceeded": "Elemento rieseguito",
|
"retrySucceeded": "Elemento rieseguito",
|
||||||
"retryItem": "Riesegui elemento",
|
"retryItem": "Riesegui elemento",
|
||||||
"retryFailed": "Problema riesecuzione elemento",
|
"retryFailed": "Problema riesecuzione elemento",
|
||||||
"credits": "Crediti",
|
"credits": "Crediti"
|
||||||
"cancelAllExceptCurrent": "Annulla tutto tranne quello corrente"
|
|
||||||
},
|
},
|
||||||
"models": {
|
"models": {
|
||||||
"noMatchingModels": "Nessun modello corrispondente",
|
"noMatchingModels": "Nessun modello corrispondente",
|
||||||
@@ -1326,8 +1225,7 @@
|
|||||||
"addLora": "Aggiungi LoRA",
|
"addLora": "Aggiungi LoRA",
|
||||||
"defaultVAE": "VAE predefinito",
|
"defaultVAE": "VAE predefinito",
|
||||||
"concepts": "Concetti",
|
"concepts": "Concetti",
|
||||||
"lora": "LoRA",
|
"lora": "LoRA"
|
||||||
"noCompatibleLoRAs": "Nessun LoRA compatibile"
|
|
||||||
},
|
},
|
||||||
"invocationCache": {
|
"invocationCache": {
|
||||||
"disable": "Disabilita",
|
"disable": "Disabilita",
|
||||||
@@ -1728,7 +1626,7 @@
|
|||||||
"structure": {
|
"structure": {
|
||||||
"heading": "Struttura",
|
"heading": "Struttura",
|
||||||
"paragraphs": [
|
"paragraphs": [
|
||||||
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Un valore struttura basso permette cambiamenti significativi, mentre un valore struttura alto conserva la composizione e lo schema originali."
|
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Una struttura bassa permette cambiamenti significativi, mentre una struttura alta conserva la composizione e il layout originali."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"fluxDevLicense": {
|
"fluxDevLicense": {
|
||||||
@@ -1785,20 +1683,6 @@
|
|||||||
"paragraphs": [
|
"paragraphs": [
|
||||||
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
|
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
|
||||||
]
|
]
|
||||||
},
|
|
||||||
"tileSize": {
|
|
||||||
"heading": "Dimensione riquadro",
|
|
||||||
"paragraphs": [
|
|
||||||
"Controlla la dimensione dei riquadri utilizzati durante il processo di ampliamento. Riquadri più grandi consumano più memoria, ma possono produrre risultati migliori.",
|
|
||||||
"I modelli SD1.5 hanno un valore predefinito di 768, mentre i modelli SDXL hanno un valore predefinito di 1024. Ridurre le dimensioni dei riquadri in caso di problemi di memoria."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"tileOverlap": {
|
|
||||||
"heading": "Sovrapposizione riquadri",
|
|
||||||
"paragraphs": [
|
|
||||||
"Controlla la sovrapposizione tra riquadri adiacenti durante l'ampliamento. Valori di sovrapposizione più elevati aiutano a ridurre le giunzioni visibili tra i riquadri, ma consuma più memoria.",
|
|
||||||
"Il valore predefinito di 128 è adatto alla maggior parte dei casi, ma è possibile modificarlo in base alle proprie esigenze specifiche e ai limiti di memoria."
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"sdxl": {
|
"sdxl": {
|
||||||
@@ -1846,7 +1730,7 @@
|
|||||||
"parameterSet": "Parametro {{parameter}} impostato",
|
"parameterSet": "Parametro {{parameter}} impostato",
|
||||||
"parsingFailed": "Analisi non riuscita",
|
"parsingFailed": "Analisi non riuscita",
|
||||||
"recallParameter": "Richiama {{label}}",
|
"recallParameter": "Richiama {{label}}",
|
||||||
"canvasV2Metadata": "Livelli Tela",
|
"canvasV2Metadata": "Tela",
|
||||||
"guidance": "Guida",
|
"guidance": "Guida",
|
||||||
"seamlessXAxis": "Asse X senza giunte",
|
"seamlessXAxis": "Asse X senza giunte",
|
||||||
"seamlessYAxis": "Asse Y senza giunte",
|
"seamlessYAxis": "Asse Y senza giunte",
|
||||||
@@ -1894,7 +1778,7 @@
|
|||||||
"opened": "Aperto",
|
"opened": "Aperto",
|
||||||
"convertGraph": "Converti grafico",
|
"convertGraph": "Converti grafico",
|
||||||
"loadWorkflow": "$t(common.load) Flusso di lavoro",
|
"loadWorkflow": "$t(common.load) Flusso di lavoro",
|
||||||
"autoLayout": "Schema automatico",
|
"autoLayout": "Disposizione automatica",
|
||||||
"loadFromGraph": "Carica il flusso di lavoro dal grafico",
|
"loadFromGraph": "Carica il flusso di lavoro dal grafico",
|
||||||
"userWorkflows": "Flussi di lavoro utente",
|
"userWorkflows": "Flussi di lavoro utente",
|
||||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||||
@@ -2017,16 +1901,7 @@
|
|||||||
"prompt": {
|
"prompt": {
|
||||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||||
"noMatchingTriggers": "Nessun Trigger corrispondente",
|
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||||
"discard": "Scarta",
|
|
||||||
"insert": "Inserisci",
|
|
||||||
"replace": "Sostituisci",
|
|
||||||
"resultSubtitle": "Scegli come gestire il prompt espanso:",
|
|
||||||
"resultTitle": "Espansione del prompt completata",
|
|
||||||
"expandingPrompt": "Espansione del prompt...",
|
|
||||||
"uploadImageForPromptGeneration": "Carica l'immagine per la generazione del prompt",
|
|
||||||
"expandCurrentPrompt": "Espandi il prompt corrente",
|
|
||||||
"generateFromImage": "Genera prompt dall'immagine"
|
|
||||||
},
|
},
|
||||||
"controlLayers": {
|
"controlLayers": {
|
||||||
"addLayer": "Aggiungi Livello",
|
"addLayer": "Aggiungi Livello",
|
||||||
@@ -2337,11 +2212,7 @@
|
|||||||
"label": "Preserva la regione mascherata"
|
"label": "Preserva la regione mascherata"
|
||||||
},
|
},
|
||||||
"isolatedLayerPreview": "Anteprima livello isolato",
|
"isolatedLayerPreview": "Anteprima livello isolato",
|
||||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione.",
|
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione."
|
||||||
"saveAllImagesToGallery": {
|
|
||||||
"alert": "Invia le nuove generazioni alla Galleria, bypassando la Tela",
|
|
||||||
"label": "Invia le nuove generazioni alla Galleria"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"transform": {
|
"transform": {
|
||||||
"reset": "Reimposta",
|
"reset": "Reimposta",
|
||||||
@@ -2391,8 +2262,7 @@
|
|||||||
"newRegionalGuidance": "Nuova Guida Regionale",
|
"newRegionalGuidance": "Nuova Guida Regionale",
|
||||||
"copyToClipboard": "Copia negli appunti",
|
"copyToClipboard": "Copia negli appunti",
|
||||||
"copyCanvasToClipboard": "Copia la tela negli appunti",
|
"copyCanvasToClipboard": "Copia la tela negli appunti",
|
||||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti",
|
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
|
||||||
"newResizedControlLayer": "Nuovo livello di controllo ridimensionato"
|
|
||||||
},
|
},
|
||||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
||||||
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
||||||
@@ -2429,10 +2299,10 @@
|
|||||||
"replaceCurrent": "Sostituisci corrente",
|
"replaceCurrent": "Sostituisci corrente",
|
||||||
"mergeDown": "Unire in basso",
|
"mergeDown": "Unire in basso",
|
||||||
"mergingLayers": "Unione dei livelli",
|
"mergingLayers": "Unione dei livelli",
|
||||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||||
"useImage": "Usa immagine",
|
"useImage": "Usa immagine",
|
||||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla galleria su questa Immagine di riferimento.",
|
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||||
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
||||||
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
||||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||||
@@ -2482,20 +2352,7 @@
|
|||||||
"denoiseLimit": "Limite di riduzione del rumore",
|
"denoiseLimit": "Limite di riduzione del rumore",
|
||||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||||
"imageNoise": "Rumore dell'immagine",
|
"imageNoise": "Rumore dell'immagine"
|
||||||
"exportCanvasToPSD": "Esporta la tela in PSD",
|
|
||||||
"ruleOfThirds": "Mostra la regola dei terzi",
|
|
||||||
"showNonRasterLayers": "Mostra livelli non raster (Shift+H)",
|
|
||||||
"hideNonRasterLayers": "Nascondi livelli non raster (Shift+H)",
|
|
||||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questa immagine di riferimento o <PullBboxButton>trascina il riquadro di delimitazione in questa immagine di riferimento</PullBboxButton> per iniziare.",
|
|
||||||
"uploadOrDragAnImage": "Trascina un'immagine dalla galleria o <UploadButton>carica un'immagine</UploadButton>.",
|
|
||||||
"autoSwitch": {
|
|
||||||
"switchOnStart": "All'inizio",
|
|
||||||
"switchOnFinish": "Alla fine",
|
|
||||||
"off": "Spento"
|
|
||||||
},
|
|
||||||
"invertMask": "Inverti maschera",
|
|
||||||
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere"
|
|
||||||
},
|
},
|
||||||
"ui": {
|
"ui": {
|
||||||
"tabs": {
|
"tabs": {
|
||||||
@@ -2509,55 +2366,6 @@
|
|||||||
"upscaling": "Amplia",
|
"upscaling": "Amplia",
|
||||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||||
"gallery": "Galleria"
|
"gallery": "Galleria"
|
||||||
},
|
|
||||||
"launchpad": {
|
|
||||||
"workflowsTitle": "Approfondisci i flussi di lavoro.",
|
|
||||||
"upscalingTitle": "Amplia e aggiungi dettagli.",
|
|
||||||
"canvasTitle": "Modifica e perfeziona sulla tela.",
|
|
||||||
"generateTitle": "Genera immagini da prompt testuali.",
|
|
||||||
"modelGuideText": "Vuoi scoprire quali prompt funzionano meglio per ciascun modello?",
|
|
||||||
"modelGuideLink": "Consulta la nostra guida ai modelli.",
|
|
||||||
"workflows": {
|
|
||||||
"description": "I flussi di lavoro sono modelli riutilizzabili che automatizzano le attività di generazione delle immagini, consentendo di eseguire rapidamente operazioni complesse e di ottenere risultati coerenti.",
|
|
||||||
"learnMoreLink": "Scopri di più sulla creazione di flussi di lavoro",
|
|
||||||
"browseTemplates": {
|
|
||||||
"title": "Sfoglia i modelli di flusso di lavoro",
|
|
||||||
"description": "Scegli tra flussi di lavoro predefiniti per le attività comuni"
|
|
||||||
},
|
|
||||||
"createNew": {
|
|
||||||
"title": "Crea un nuovo flusso di lavoro",
|
|
||||||
"description": "Avvia un nuovo flusso di lavoro da zero"
|
|
||||||
},
|
|
||||||
"loadFromFile": {
|
|
||||||
"title": "Carica flusso di lavoro da file",
|
|
||||||
"description": "Carica un flusso di lavoro per iniziare con una configurazione esistente"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"upscaling": {
|
|
||||||
"uploadImage": {
|
|
||||||
"title": "Carica l'immagine da ampliare",
|
|
||||||
"description": "Fai clic o trascina un'immagine per ingrandirla (JPG, PNG, WebP fino a 100 MB)"
|
|
||||||
},
|
|
||||||
"replaceImage": {
|
|
||||||
"title": "Sostituisci l'immagine corrente",
|
|
||||||
"description": "Fai clic o trascina una nuova immagine per sostituire quella corrente"
|
|
||||||
},
|
|
||||||
"imageReady": {
|
|
||||||
"title": "Immagine pronta",
|
|
||||||
"description": "Premere Invoke per iniziare l'ampliamento"
|
|
||||||
},
|
|
||||||
"readyToUpscale": {
|
|
||||||
"title": "Pronto per ampliare!",
|
|
||||||
"description": "Configura le impostazioni qui sotto, quindi fai clic sul pulsante Invoke per iniziare ad ampliare l'immagine."
|
|
||||||
},
|
|
||||||
"upscaleModel": "Modello per l'ampliamento",
|
|
||||||
"model": "Modello",
|
|
||||||
"scale": "Scala",
|
|
||||||
"helpText": {
|
|
||||||
"promptAdvice": "Durante l'ampliamento, utilizza un prompt che descriva il mezzo e lo stile. Evita di descrivere dettagli specifici del contenuto dell'immagine.",
|
|
||||||
"styleAdvice": "L'ampliamento funziona meglio con lo stile generale dell'immagine."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"upscaling": {
|
"upscaling": {
|
||||||
@@ -2578,10 +2386,7 @@
|
|||||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||||
"upscale": "Amplia",
|
"upscale": "Amplia",
|
||||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento.",
|
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||||
"tileControl": "Controllo del riquadro",
|
|
||||||
"tileSize": "Dimensione del riquadro",
|
|
||||||
"tileOverlap": "Sovrapposizione riquadro"
|
|
||||||
},
|
},
|
||||||
"upsell": {
|
"upsell": {
|
||||||
"inviteTeammates": "Invita collaboratori",
|
"inviteTeammates": "Invita collaboratori",
|
||||||
@@ -2631,8 +2436,7 @@
|
|||||||
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
||||||
"noTemplates": "Nessun modello",
|
"noTemplates": "Nessun modello",
|
||||||
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
||||||
"promptTemplateCleared": "Modello di prompt cancellato",
|
"promptTemplateCleared": "Modello di prompt cancellato"
|
||||||
"togglePromptPreviews": "Attiva/disattiva le anteprime dei prompt"
|
|
||||||
},
|
},
|
||||||
"newUserExperience": {
|
"newUserExperience": {
|
||||||
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
|
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
|
||||||
@@ -2648,10 +2452,8 @@
|
|||||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||||
"items": [
|
"items": [
|
||||||
"Nuova impostazione per inviare tutte le generazioni della Tela direttamente alla Galleria.",
|
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||||
"Nuove funzionalità Inverti maschera (Maiusc+V) e Adatta il Riquadro di delimitazione alla maschera (Maiusc+B).",
|
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||||
"Supporto esteso per miniature e configurazioni dei modelli.",
|
|
||||||
"Vari altri aggiornamenti e correzioni per la qualità della vita"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"system": {
|
"system": {
|
||||||
@@ -2683,18 +2485,64 @@
|
|||||||
"supportVideos": {
|
"supportVideos": {
|
||||||
"gettingStarted": "Iniziare",
|
"gettingStarted": "Iniziare",
|
||||||
"supportVideos": "Video di supporto",
|
"supportVideos": "Video di supporto",
|
||||||
"watch": "Guarda",
|
|
||||||
"studioSessionsDesc": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e porre domande. Le sessioni vengono caricate nella playlist la settimana successiva.",
|
|
||||||
"videos": {
|
"videos": {
|
||||||
"gettingStarted": {
|
"usingControlLayersAndReferenceGuides": {
|
||||||
"title": "Introduzione a Invoke",
|
"title": "Utilizzo di livelli di controllo e guide di riferimento",
|
||||||
"description": "Serie video completa che copre tutto ciò che devi sapere per iniziare a usare Invoke, dalla creazione della tua prima immagine alle tecniche avanzate."
|
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
|
||||||
},
|
},
|
||||||
"studioSessions": {
|
"creatingYourFirstImage": {
|
||||||
"title": "Sessioni in studio",
|
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
|
||||||
"description": "Sessioni approfondite che esplorano le funzionalità avanzate di Invoke, i flussi di lavoro creativi e le discussioni della community."
|
"title": "Creazione della tua prima immagine"
|
||||||
|
},
|
||||||
|
"understandingImageToImageAndDenoising": {
|
||||||
|
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
|
||||||
|
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
|
||||||
|
},
|
||||||
|
"howDoIDoImageToImageTransformation": {
|
||||||
|
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
|
||||||
|
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
|
||||||
|
},
|
||||||
|
"howDoIUseInpaintMasks": {
|
||||||
|
"title": "Come si usano le maschere Inpaint?",
|
||||||
|
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
|
||||||
|
},
|
||||||
|
"howDoIOutpaint": {
|
||||||
|
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
|
||||||
|
"title": "Come posso eseguire l'outpainting?"
|
||||||
|
},
|
||||||
|
"exploringAIModelsAndConceptAdapters": {
|
||||||
|
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
|
||||||
|
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
|
||||||
|
},
|
||||||
|
"upscaling": {
|
||||||
|
"title": "Ampliamento",
|
||||||
|
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
|
||||||
|
},
|
||||||
|
"creatingAndComposingOnInvokesControlCanvas": {
|
||||||
|
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
|
||||||
|
"title": "Creare e comporre sulla tela di controllo di Invoke"
|
||||||
|
},
|
||||||
|
"howDoIGenerateAndSaveToTheGallery": {
|
||||||
|
"description": "Passaggi per generare e salvare le immagini nella galleria.",
|
||||||
|
"title": "Come posso generare e salvare nella Galleria?"
|
||||||
|
},
|
||||||
|
"howDoIEditOnTheCanvas": {
|
||||||
|
"title": "Come posso apportare modifiche sulla tela?",
|
||||||
|
"description": "Guida alla modifica delle immagini direttamente sulla tela."
|
||||||
|
},
|
||||||
|
"howDoIUseControlNetsAndControlLayers": {
|
||||||
|
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
|
||||||
|
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
|
||||||
|
},
|
||||||
|
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||||
|
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
|
||||||
|
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
"controlCanvas": "Tela di Controllo",
|
||||||
|
"watch": "Guarda",
|
||||||
|
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
|
||||||
|
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
|
||||||
},
|
},
|
||||||
"modelCache": {
|
"modelCache": {
|
||||||
"clear": "Cancella la cache del modello",
|
"clear": "Cancella la cache del modello",
|
||||||
|
|||||||
@@ -141,7 +141,7 @@
|
|||||||
"loading": "ロード中",
|
"loading": "ロード中",
|
||||||
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
||||||
"drop": "ドロップ",
|
"drop": "ドロップ",
|
||||||
"dropOrUpload": "ドロップまたはアップロード",
|
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||||
"deleteImage_other": "画像 {{count}} 枚を削除",
|
"deleteImage_other": "画像 {{count}} 枚を削除",
|
||||||
"deleteImagePermanent": "削除された画像は復元できません。",
|
"deleteImagePermanent": "削除された画像は復元できません。",
|
||||||
"download": "ダウンロード",
|
"download": "ダウンロード",
|
||||||
@@ -193,8 +193,7 @@
|
|||||||
"images": "画像",
|
"images": "画像",
|
||||||
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
|
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
|
||||||
"imagesTab": "Invoke内で作成および保存された画像。",
|
"imagesTab": "Invoke内で作成および保存された画像。",
|
||||||
"assets": "アセット",
|
"assets": "アセット"
|
||||||
"useForPromptGeneration": "プロンプト生成に使用する"
|
|
||||||
},
|
},
|
||||||
"hotkeys": {
|
"hotkeys": {
|
||||||
"searchHotkeys": "ホットキーを検索",
|
"searchHotkeys": "ホットキーを検索",
|
||||||
@@ -364,16 +363,6 @@
|
|||||||
"selectRectTool": {
|
"selectRectTool": {
|
||||||
"title": "矩形ツール",
|
"title": "矩形ツール",
|
||||||
"desc": "矩形ツールを選択します。"
|
"desc": "矩形ツールを選択します。"
|
||||||
},
|
|
||||||
"settings": {
|
|
||||||
"behavior": "行動",
|
|
||||||
"display": "ディスプレイ",
|
|
||||||
"grid": "グリッド",
|
|
||||||
"debug": "デバッグ"
|
|
||||||
},
|
|
||||||
"toggleNonRasterLayers": {
|
|
||||||
"title": "非ラスターレイヤーの切り替え",
|
|
||||||
"desc": "ラスター以外のレイヤー カテゴリ (コントロール レイヤー、インペイント マスク、地域ガイダンス) を表示または非表示にします。"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"workflows": {
|
"workflows": {
|
||||||
@@ -641,7 +630,7 @@
|
|||||||
"restoreDefaultSettings": "クリックするとモデルのデフォルト設定が使用されます.",
|
"restoreDefaultSettings": "クリックするとモデルのデフォルト設定が使用されます.",
|
||||||
"hfTokenSaved": "ハギングフェイストークンを保存しました",
|
"hfTokenSaved": "ハギングフェイストークンを保存しました",
|
||||||
"imageEncoderModelId": "画像エンコーダーモデルID",
|
"imageEncoderModelId": "画像エンコーダーモデルID",
|
||||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます。",
|
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます",
|
||||||
"learnMoreAboutSupportedModels": "私たちのサポートしているモデルについて更に学ぶ",
|
"learnMoreAboutSupportedModels": "私たちのサポートしているモデルについて更に学ぶ",
|
||||||
"modelImageUpdateFailed": "モデル画像アップデート失敗",
|
"modelImageUpdateFailed": "モデル画像アップデート失敗",
|
||||||
"scanFolder": "スキャンフォルダ",
|
"scanFolder": "スキャンフォルダ",
|
||||||
@@ -665,30 +654,7 @@
|
|||||||
"manageModels": "モデル管理",
|
"manageModels": "モデル管理",
|
||||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||||
"relatedModels": "関連のあるモデル",
|
"relatedModels": "関連のあるモデル",
|
||||||
"showOnlyRelatedModels": "関連している",
|
"showOnlyRelatedModels": "関連している"
|
||||||
"installedModelsCount": "{{total}} モデルのうち {{installed}} 個がインストールされています。",
|
|
||||||
"allNModelsInstalled": "{{count}} 個のモデルがすべてインストールされています",
|
|
||||||
"nToInstall": "{{count}}個をインストールする",
|
|
||||||
"nAlreadyInstalled": "{{count}} 個すでにインストールされています",
|
|
||||||
"bundleAlreadyInstalled": "バンドルがすでにインストールされています",
|
|
||||||
"bundleAlreadyInstalledDesc": "{{bundleName}} バンドル内のすべてのモデルはすでにインストールされています。",
|
|
||||||
"launchpadTab": "ランチパッド",
|
|
||||||
"launchpad": {
|
|
||||||
"welcome": "モデルマネジメントへようこそ",
|
|
||||||
"description": "Invoke プラットフォームのほとんどの機能を利用するには、モデルのインストールが必要です。手動インストールオプションから選択するか、厳選されたスターターモデルをご覧ください。",
|
|
||||||
"manualInstall": "マニュアルインストール",
|
|
||||||
"urlDescription": "URLまたはローカルファイルパスからモデルをインストールします。特定のモデルを追加したい場合に最適です。",
|
|
||||||
"huggingFaceDescription": "HuggingFace リポジトリからモデルを直接参照してインストールします。",
|
|
||||||
"scanFolderDescription": "ローカルフォルダをスキャンしてモデルを自動的に検出し、インストールします。",
|
|
||||||
"recommendedModels": "推奨モデル",
|
|
||||||
"exploreStarter": "または、利用可能なすべてのスターターモデルを参照してください",
|
|
||||||
"quickStart": "クイックスタートバンドル",
|
|
||||||
"bundleDescription": "各バンドルには各モデルファミリーの必須モデルと、開始するための厳選されたベースモデルが含まれています。",
|
|
||||||
"browseAll": "または、利用可能なすべてのモデルを参照してください。",
|
|
||||||
"stableDiffusion15": "Stable Diffusion1.5",
|
|
||||||
"sdxl": "SDXL",
|
|
||||||
"fluxDev": "FLUX.1 dev"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "画像",
|
"images": "画像",
|
||||||
@@ -754,10 +720,7 @@
|
|||||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
|
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
|
||||||
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
||||||
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
||||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
|
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。"
|
||||||
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
|
|
||||||
"promptExpansionPending": "プロンプト拡張が進行中",
|
|
||||||
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
|
|
||||||
},
|
},
|
||||||
"aspect": "縦横比",
|
"aspect": "縦横比",
|
||||||
"lockAspectRatio": "縦横比を固定",
|
"lockAspectRatio": "縦横比を固定",
|
||||||
@@ -912,26 +875,7 @@
|
|||||||
"imageNotLoadedDesc": "画像を見つけられません",
|
"imageNotLoadedDesc": "画像を見つけられません",
|
||||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください.",
|
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||||
"noRasterLayers": "ラスターレイヤーが見つかりません",
|
|
||||||
"noRasterLayersDesc": "PSDにエクスポートするには、少なくとも1つのラスターレイヤーを作成します",
|
|
||||||
"noActiveRasterLayers": "アクティブなラスターレイヤーがありません",
|
|
||||||
"noActiveRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
|
||||||
"noVisibleRasterLayers": "表示されるラスター レイヤーがありません",
|
|
||||||
"noVisibleRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
|
||||||
"invalidCanvasDimensions": "キャンバスのサイズが無効です",
|
|
||||||
"canvasTooLarge": "キャンバスが大きすぎます",
|
|
||||||
"canvasTooLargeDesc": "キャンバスのサイズがPSDエクスポートの最大許容サイズを超えています。キャンバス全体の幅と高さを小さくしてから、もう一度お試しください。",
|
|
||||||
"failedToProcessLayers": "レイヤーの処理に失敗しました",
|
|
||||||
"psdExportSuccess": "PSDエクスポート完了",
|
|
||||||
"psdExportSuccessDesc": "{{count}} 個のレイヤーを PSD ファイルに正常にエクスポートしました",
|
|
||||||
"problemExportingPSD": "PSD のエクスポート中に問題が発生しました",
|
|
||||||
"canvasManagerNotAvailable": "キャンバスマネージャーは利用できません",
|
|
||||||
"noValidLayerAdapters": "有効なレイヤーアダプタが見つかりません",
|
|
||||||
"fluxKontextIncompatibleGenerationMode": "Flux Kontext はテキストから画像への変換のみをサポートしています。画像から画像への変換、インペインティング、アウトペインティングのタスクには他のモデルを使用してください。",
|
|
||||||
"promptGenerationStarted": "プロンプト生成が開始されました",
|
|
||||||
"uploadAndPromptGenerationFailed": "画像のアップロードとプロンプトの生成に失敗しました",
|
|
||||||
"promptExpansionFailed": "プロンプト拡張に失敗しました"
|
|
||||||
},
|
},
|
||||||
"accessibility": {
|
"accessibility": {
|
||||||
"invokeProgressBar": "進捗バー",
|
"invokeProgressBar": "進捗バー",
|
||||||
@@ -1070,8 +1014,7 @@
|
|||||||
"lora": "LoRA",
|
"lora": "LoRA",
|
||||||
"defaultVAE": "デフォルトVAE",
|
"defaultVAE": "デフォルトVAE",
|
||||||
"noLoRAsInstalled": "インストールされているLoRAはありません",
|
"noLoRAsInstalled": "インストールされているLoRAはありません",
|
||||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません",
|
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません"
|
||||||
"noCompatibleLoRAs": "互換性のあるLoRAはありません"
|
|
||||||
},
|
},
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"addNode": "ノードを追加",
|
"addNode": "ノードを追加",
|
||||||
@@ -1765,16 +1708,7 @@
|
|||||||
"prompt": {
|
"prompt": {
|
||||||
"addPromptTrigger": "プロンプトトリガーを追加",
|
"addPromptTrigger": "プロンプトトリガーを追加",
|
||||||
"compatibleEmbeddings": "互換性のある埋め込み",
|
"compatibleEmbeddings": "互換性のある埋め込み",
|
||||||
"noMatchingTriggers": "一致するトリガーがありません",
|
"noMatchingTriggers": "一致するトリガーがありません"
|
||||||
"generateFromImage": "画像からプロンプトを生成する",
|
|
||||||
"expandCurrentPrompt": "現在のプロンプトを展開",
|
|
||||||
"uploadImageForPromptGeneration": "プロンプト生成用の画像をアップロードする",
|
|
||||||
"expandingPrompt": "プロンプトを展開しています...",
|
|
||||||
"resultTitle": "プロンプト拡張完了",
|
|
||||||
"resultSubtitle": "拡張プロンプトの処理方法を選択します:",
|
|
||||||
"replace": "交換する",
|
|
||||||
"insert": "挿入する",
|
|
||||||
"discard": "破棄する"
|
|
||||||
},
|
},
|
||||||
"ui": {
|
"ui": {
|
||||||
"tabs": {
|
"tabs": {
|
||||||
@@ -1782,61 +1716,7 @@
|
|||||||
"canvas": "キャンバス",
|
"canvas": "キャンバス",
|
||||||
"workflows": "ワークフロー",
|
"workflows": "ワークフロー",
|
||||||
"models": "モデル",
|
"models": "モデル",
|
||||||
"gallery": "ギャラリー",
|
"gallery": "ギャラリー"
|
||||||
"generation": "生成",
|
|
||||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
|
||||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
|
||||||
"upscaling": "アップスケーリング",
|
|
||||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
|
||||||
},
|
|
||||||
"launchpad": {
|
|
||||||
"upscaling": {
|
|
||||||
"model": "モデル",
|
|
||||||
"scale": "スケール",
|
|
||||||
"helpText": {
|
|
||||||
"promptAdvice": "アップスケールする際は、媒体とスタイルを説明するプロンプトを使用してください。画像内の具体的なコンテンツの詳細を説明することは避けてください。",
|
|
||||||
"styleAdvice": "アップスケーリングは、画像の全体的なスタイルに最適です。"
|
|
||||||
},
|
|
||||||
"uploadImage": {
|
|
||||||
"title": "アップスケール用の画像をアップロードする",
|
|
||||||
"description": "アップスケールするには、画像をクリックまたはドラッグします(JPG、PNG、WebP、最大100MB)"
|
|
||||||
},
|
|
||||||
"replaceImage": {
|
|
||||||
"title": "現在の画像を置き換える",
|
|
||||||
"description": "新しい画像をクリックまたはドラッグして、現在の画像を置き換えます"
|
|
||||||
},
|
|
||||||
"imageReady": {
|
|
||||||
"title": "画像準備完了",
|
|
||||||
"description": "アップスケールを開始するにはInvokeを押してください"
|
|
||||||
},
|
|
||||||
"readyToUpscale": {
|
|
||||||
"title": "アップスケールの準備ができました!",
|
|
||||||
"description": "以下の設定を構成し、「Invoke」ボタンをクリックして画像のアップスケールを開始します。"
|
|
||||||
},
|
|
||||||
"upscaleModel": "アップスケールモデル"
|
|
||||||
},
|
|
||||||
"workflowsTitle": "ワークフローを詳しく見てみましょう。",
|
|
||||||
"upscalingTitle": "アップスケールして詳細を追加します。",
|
|
||||||
"canvasTitle": "キャンバス上で編集および調整します。",
|
|
||||||
"generateTitle": "テキストプロンプトから画像を生成します。",
|
|
||||||
"modelGuideText": "各モデルに最適なプロンプトを知りたいですか?",
|
|
||||||
"modelGuideLink": "モデルガイドをご覧ください。",
|
|
||||||
"workflows": {
|
|
||||||
"description": "ワークフローは、画像生成タスクを自動化する再利用可能なテンプレートであり、複雑な操作を迅速に実行して一貫した結果を得ることができます。",
|
|
||||||
"learnMoreLink": "ワークフローの作成について詳しく見る",
|
|
||||||
"browseTemplates": {
|
|
||||||
"title": "ワークフローテンプレートを参照する",
|
|
||||||
"description": "一般的なタスク用にあらかじめ構築されたワークフローから選択する"
|
|
||||||
},
|
|
||||||
"createNew": {
|
|
||||||
"title": "新規ワークフローを作成する",
|
|
||||||
"description": "新しいワークフローをゼロから始める"
|
|
||||||
},
|
|
||||||
"loadFromFile": {
|
|
||||||
"title": "ファイルからワークフローを読み込む",
|
|
||||||
"description": "既存の設定から開始するためのワークフローをアップロードする"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"controlLayers": {
|
"controlLayers": {
|
||||||
@@ -1852,16 +1732,7 @@
|
|||||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||||
"newGlobalReferenceImage": "新規全域参照画像",
|
"newGlobalReferenceImage": "新規全域参照画像",
|
||||||
"newRegionalReferenceImage": "新規領域参照画像",
|
"newRegionalReferenceImage": "新規領域参照画像",
|
||||||
"canvasGroup": "キャンバス",
|
"canvasGroup": "キャンバス"
|
||||||
"saveToGalleryGroup": "ギャラリーに保存",
|
|
||||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
|
||||||
"saveBboxToGallery": "Bボックスをギャラリーに保存",
|
|
||||||
"newControlLayer": "新規コントロールレイヤー",
|
|
||||||
"newRasterLayer": "新規ラスターレイヤー",
|
|
||||||
"newInpaintMask": "新規インペイントマスク",
|
|
||||||
"copyToClipboard": "クリップボードにコピー",
|
|
||||||
"copyCanvasToClipboard": "キャンバスをクリップボードにコピー",
|
|
||||||
"copyBboxToClipboard": "Bボックスをクリップボードにコピー"
|
|
||||||
},
|
},
|
||||||
"regionalGuidance": "領域ガイダンス",
|
"regionalGuidance": "領域ガイダンス",
|
||||||
"globalReferenceImage": "全域参照画像",
|
"globalReferenceImage": "全域参照画像",
|
||||||
@@ -1872,11 +1743,7 @@
|
|||||||
"transform": "変形",
|
"transform": "変形",
|
||||||
"apply": "適用",
|
"apply": "適用",
|
||||||
"cancel": "キャンセル",
|
"cancel": "キャンセル",
|
||||||
"reset": "リセット",
|
"reset": "リセット"
|
||||||
"fitMode": "フィットモード",
|
|
||||||
"fitModeContain": "含む",
|
|
||||||
"fitModeCover": "カバー",
|
|
||||||
"fitModeFill": "満たす"
|
|
||||||
},
|
},
|
||||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||||
@@ -1887,8 +1754,7 @@
|
|||||||
"rectangle": "矩形",
|
"rectangle": "矩形",
|
||||||
"move": "移動",
|
"move": "移動",
|
||||||
"eraser": "消しゴム",
|
"eraser": "消しゴム",
|
||||||
"bbox": "Bbox",
|
"bbox": "Bbox"
|
||||||
"view": "ビュー"
|
|
||||||
},
|
},
|
||||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||||
@@ -1908,386 +1774,25 @@
|
|||||||
"removeBookmark": "ブックマークを外す",
|
"removeBookmark": "ブックマークを外す",
|
||||||
"savedToGalleryOk": "ギャラリーに保存しました",
|
"savedToGalleryOk": "ギャラリーに保存しました",
|
||||||
"controlMode": {
|
"controlMode": {
|
||||||
"prompt": "プロンプト",
|
"prompt": "プロンプト"
|
||||||
"controlMode": "コントロールモード",
|
|
||||||
"balanced": "バランス(推奨)",
|
|
||||||
"control": "コントロール",
|
|
||||||
"megaControl": "メガコントロール"
|
|
||||||
},
|
},
|
||||||
"prompt": "プロンプト",
|
"prompt": "プロンプト",
|
||||||
"settings": {
|
"settings": {
|
||||||
"snapToGrid": {
|
"snapToGrid": {
|
||||||
"off": "オフ",
|
"off": "オフ",
|
||||||
"on": "オン",
|
"on": "オン"
|
||||||
"label": "グリッドにスナップ"
|
}
|
||||||
},
|
|
||||||
"preserveMask": {
|
|
||||||
"label": "マスクされた領域を保持",
|
|
||||||
"alert": "マスクされた領域の保存"
|
|
||||||
},
|
|
||||||
"isolatedStagingPreview": "分離されたステージングプレビュー",
|
|
||||||
"isolatedPreview": "分離されたプレビュー",
|
|
||||||
"isolatedLayerPreview": "分離されたレイヤーのプレビュー",
|
|
||||||
"isolatedLayerPreviewDesc": "フィルタリングや変換などの操作を実行するときに、このレイヤーのみを表示するかどうか。",
|
|
||||||
"invertBrushSizeScrollDirection": "ブラシサイズのスクロール反転",
|
|
||||||
"pressureSensitivity": "圧力感度"
|
|
||||||
},
|
},
|
||||||
"filter": {
|
"filter": {
|
||||||
"filter": "フィルター",
|
"filter": "フィルター",
|
||||||
"spandrel_filter": {
|
"spandrel_filter": {
|
||||||
"model": "モデル",
|
"model": "モデル"
|
||||||
"label": "img2imgモデル",
|
|
||||||
"description": "選択したレイヤーでimg2imgモデルを実行します。",
|
|
||||||
"autoScale": "オートスケール",
|
|
||||||
"autoScaleDesc": "選択したモデルは、目標スケールに達するまで実行されます。",
|
|
||||||
"scale": "ターゲットスケール"
|
|
||||||
},
|
},
|
||||||
"apply": "適用",
|
"apply": "適用",
|
||||||
"reset": "リセット",
|
"reset": "リセット",
|
||||||
"cancel": "キャンセル",
|
"cancel": "キャンセル"
|
||||||
"filters": "フィルター",
|
|
||||||
"filterType": "フィルタータイプ",
|
|
||||||
"autoProcess": "オートプロセス",
|
|
||||||
"process": "プロセス",
|
|
||||||
"advanced": "アドバンスド",
|
|
||||||
"processingLayerWith": "{{type}} フィルターを使用した処理レイヤー。",
|
|
||||||
"forMoreControl": "さらに細かく制御するには、以下の「詳細設定」をクリックしてください。",
|
|
||||||
"canny_edge_detection": {
|
|
||||||
"label": "キャニーエッジ検出",
|
|
||||||
"description": "Canny エッジ検出アルゴリズムを使用して、選択したレイヤーからエッジ マップを生成します。",
|
|
||||||
"low_threshold": "低閾値",
|
|
||||||
"high_threshold": "高閾値"
|
|
||||||
},
|
|
||||||
"color_map": {
|
|
||||||
"label": "カラーマップ",
|
|
||||||
"description": "選択したレイヤーからカラーマップを作成します。",
|
|
||||||
"tile_size": "タイルサイズ"
|
|
||||||
},
|
|
||||||
"content_shuffle": {
|
|
||||||
"label": "コンテンツシャッフル",
|
|
||||||
"description": "選択したレイヤーのコンテンツを、「液化」効果と同様にシャッフルします。",
|
|
||||||
"scale_factor": "スケール係数"
|
|
||||||
},
|
|
||||||
"depth_anything_depth_estimation": {
|
|
||||||
"label": "デプスエニシング",
|
|
||||||
"description": "デプスエニシングモデルを使用して、選択したレイヤーから深度マップを生成します。",
|
|
||||||
"model_size": "モデルサイズ",
|
|
||||||
"model_size_small": "スモール",
|
|
||||||
"model_size_small_v2": "スモールv2",
|
|
||||||
"model_size_base": "ベース",
|
|
||||||
"model_size_large": "ラージ"
|
|
||||||
},
|
|
||||||
"dw_openpose_detection": {
|
|
||||||
"label": "DW オープンポーズ検出",
|
|
||||||
"description": "DW Openpose モデルを使用して、選択したレイヤー内の人間のポーズを検出します。",
|
|
||||||
"draw_hands": "手を描く",
|
|
||||||
"draw_face": "顔を描く",
|
|
||||||
"draw_body": "体を描く"
|
|
||||||
},
|
|
||||||
"hed_edge_detection": {
|
|
||||||
"label": "HEDエッジ検出",
|
|
||||||
"description": "HED エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
|
||||||
"scribble": "落書き"
|
|
||||||
},
|
|
||||||
"lineart_anime_edge_detection": {
|
|
||||||
"label": "線画アニメのエッジ検出",
|
|
||||||
"description": "線画アニメエッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。"
|
|
||||||
},
|
|
||||||
"lineart_edge_detection": {
|
|
||||||
"label": "線画エッジ検出",
|
|
||||||
"description": "線画エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
|
||||||
"coarse": "粗い"
|
|
||||||
},
|
|
||||||
"mediapipe_face_detection": {
|
|
||||||
"label": "メディアパイプ顔検出",
|
|
||||||
"description": "メディアパイプ顔検出モデルを使用して、選択したレイヤー内の顔を検出します。",
|
|
||||||
"max_faces": "マックスフェイス",
|
|
||||||
"min_confidence": "最小信頼度"
|
|
||||||
},
|
|
||||||
"mlsd_detection": {
|
|
||||||
"label": "線分検出",
|
|
||||||
"description": "MLSD 線分検出モデルを使用して、選択したレイヤーから線分マップを生成します。",
|
|
||||||
"score_threshold": "スコア閾値",
|
|
||||||
"distance_threshold": "距離閾値"
|
|
||||||
},
|
|
||||||
"normal_map": {
|
|
||||||
"label": "ノーマルマップ",
|
|
||||||
"description": "選択したレイヤーからノーマルマップを生成します。"
|
|
||||||
},
|
|
||||||
"pidi_edge_detection": {
|
|
||||||
"label": "PiDiNetエッジ検出",
|
|
||||||
"description": "PiDiNet エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
|
||||||
"scribble": "落書き",
|
|
||||||
"quantize_edges": "エッジを量子化する"
|
|
||||||
},
|
|
||||||
"img_blur": {
|
|
||||||
"label": "画像をぼかす",
|
|
||||||
"description": "選択したレイヤーをぼかします。",
|
|
||||||
"blur_type": "ぼかしの種類",
|
|
||||||
"blur_radius": "半径",
|
|
||||||
"gaussian_type": "ガウス分布",
|
|
||||||
"box_type": "ボックス"
|
|
||||||
},
|
|
||||||
"img_noise": {
|
|
||||||
"label": "ノイズ画像",
|
|
||||||
"description": "選択したレイヤーにノイズを追加します。",
|
|
||||||
"noise_type": "ノイズの種類",
|
|
||||||
"noise_amount": "総計",
|
|
||||||
"gaussian_type": "ガウス分布",
|
|
||||||
"salt_and_pepper_type": "塩コショウ",
|
|
||||||
"noise_color": "カラーノイズ",
|
|
||||||
"size": "ノイズサイズ"
|
|
||||||
},
|
|
||||||
"adjust_image": {
|
|
||||||
"label": "画像を調整する",
|
|
||||||
"description": "画像の選択したチャンネルを調整します。",
|
|
||||||
"channel": "チャンネル",
|
|
||||||
"value_setting": "バリュー",
|
|
||||||
"scale_values": "スケールバリュー",
|
|
||||||
"red": "赤(RGBA)",
|
|
||||||
"green": "緑(RGBA)",
|
|
||||||
"blue": "青(RGBA)",
|
|
||||||
"alpha": "アルファ(RGBA)",
|
|
||||||
"cyan": "シアン(CMYK)",
|
|
||||||
"magenta": "マゼンタ(CMYK)",
|
|
||||||
"yellow": "黄色(CMYK)",
|
|
||||||
"black": "黒(CMYK)",
|
|
||||||
"hue": "色相(HSV)",
|
|
||||||
"saturation": "彩度(HSV)",
|
|
||||||
"value": "値(HSV)",
|
|
||||||
"luminosity": "明度(LAB)",
|
|
||||||
"a": "A(ラボ)",
|
|
||||||
"b": "B(ラボ)",
|
|
||||||
"y": "Y(YCbCr)",
|
|
||||||
"cb": "Cb(YCbCr)",
|
|
||||||
"cr": "Cr(YCbCr)"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"weight": "重み",
|
"weight": "重み"
|
||||||
"bookmark": "クイックスイッチのブックマーク",
|
|
||||||
"exportCanvasToPSD": "キャンバスをPSDにエクスポート",
|
|
||||||
"savedToGalleryError": "ギャラリーへの保存中にエラーが発生しました",
|
|
||||||
"regionCopiedToClipboard": "{{region}} をクリップボードにコピーしました",
|
|
||||||
"copyRegionError": "{{region}} のコピー中にエラーが発生しました",
|
|
||||||
"newGlobalReferenceImageOk": "作成されたグローバル参照画像",
|
|
||||||
"newGlobalReferenceImageError": "グローバル参照イメージの作成中に問題が発生しました",
|
|
||||||
"newRegionalReferenceImageOk": "地域参照画像の作成",
|
|
||||||
"newRegionalReferenceImageError": "地域参照画像の作成中に問題が発生しました",
|
|
||||||
"newControlLayerOk": "制御レイヤーの作成",
|
|
||||||
"newControlLayerError": "制御層の作成中に問題が発生しました",
|
|
||||||
"newRasterLayerOk": "ラスターレイヤーを作成しました",
|
|
||||||
"newRasterLayerError": "ラスターレイヤーの作成中に問題が発生しました",
|
|
||||||
"pullBboxIntoLayerOk": "Bbox をレイヤーにプル",
|
|
||||||
"pullBboxIntoLayerError": "BBox をレイヤーにプルする際に問題が発生しました",
|
|
||||||
"pullBboxIntoReferenceImageOk": "Bbox が ReferenceImage にプルされました",
|
|
||||||
"pullBboxIntoReferenceImageError": "BBox を ReferenceImage にプルする際に問題が発生しました",
|
|
||||||
"regionIsEmpty": "選択した領域は空です",
|
|
||||||
"mergeVisible": "マージを可視化",
|
|
||||||
"mergeVisibleOk": "マージされたレイヤー",
|
|
||||||
"mergeVisibleError": "レイヤーの結合エラー",
|
|
||||||
"mergingLayers": "レイヤーのマージ",
|
|
||||||
"clearHistory": "履歴をクリア",
|
|
||||||
"bboxOverlay": "Bboxオーバーレイを表示",
|
|
||||||
"ruleOfThirds": "三分割法を表示",
|
|
||||||
"newSession": "新しいセッション",
|
|
||||||
"clearCaches": "キャッシュをクリア",
|
|
||||||
"recalculateRects": "長方形を再計算する",
|
|
||||||
"clipToBbox": "ストロークをBboxにクリップ",
|
|
||||||
"outputOnlyMaskedRegions": "生成された領域のみを出力する",
|
|
||||||
"width": "幅",
|
|
||||||
"autoNegative": "オートネガティブ",
|
|
||||||
"enableAutoNegative": "オートネガティブを有効にする",
|
|
||||||
"disableAutoNegative": "オートネガティブを無効にする",
|
|
||||||
"deletePrompt": "プロンプトを削除",
|
|
||||||
"deleteReferenceImage": "参照画像を削除",
|
|
||||||
"showHUD": "HUDを表示",
|
|
||||||
"maskFill": "マスク塗りつぶし",
|
|
||||||
"addPositivePrompt": "$t(controlLayers.prompt) を追加します",
|
|
||||||
"addNegativePrompt": "$t(controlLayers.negativePrompt)を追加します",
|
|
||||||
"addReferenceImage": "$t(controlLayers.referenceImage)を追加します",
|
|
||||||
"addImageNoise": "$t(controlLayers.imageNoise)を追加します",
|
|
||||||
"addRasterLayer": "$t(controlLayers.rasterLayer)を追加します",
|
|
||||||
"addControlLayer": "$t(controlLayers.controlLayer)を追加します",
|
|
||||||
"addInpaintMask": "$t(controlLayers.inpaintMask)を追加します",
|
|
||||||
"addRegionalGuidance": "$t(controlLayers.regionalGuidance)を追加します",
|
|
||||||
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage)を追加します",
|
|
||||||
"addDenoiseLimit": "$t(controlLayers.denoiseLimit)を追加します",
|
|
||||||
"controlLayer": "コントロールレイヤー",
|
|
||||||
"inpaintMask": "インペイントマスク",
|
|
||||||
"referenceImageRegional": "参考画像(地域別)",
|
|
||||||
"referenceImageGlobal": "参考画像(グローバル)",
|
|
||||||
"asRasterLayer": "$t(controlLayers.rasterLayer) として",
|
|
||||||
"asRasterLayerResize": "$t(controlLayers.rasterLayer) として (リサイズ)",
|
|
||||||
"asControlLayer": "$t(controlLayers.controlLayer) として",
|
|
||||||
"asControlLayerResize": "$t(controlLayers.controlLayer) として (リサイズ)",
|
|
||||||
"referenceImage": "参照画像",
|
|
||||||
"sendingToCanvas": "キャンバスに生成をのせる",
|
|
||||||
"sendingToGallery": "生成をギャラリーに送る",
|
|
||||||
"sendToGallery": "ギャラリーに送る",
|
|
||||||
"sendToGalleryDesc": "Invokeを押すとユニークな画像が生成され、ギャラリーに保存されます。",
|
|
||||||
"sendToCanvas": "キャンバスに送る",
|
|
||||||
"newLayerFromImage": "画像から新規レイヤー",
|
|
||||||
"newCanvasFromImage": "画像から新規キャンバス",
|
|
||||||
"newImg2ImgCanvasFromImage": "画像からの新規 Img2Img",
|
|
||||||
"copyToClipboard": "クリップボードにコピー",
|
|
||||||
"sendToCanvasDesc": "Invokeを押すと、進行中の作品がキャンバス上にステージされます。",
|
|
||||||
"viewProgressInViewer": "<Btn>画像ビューア</Btn>で進行状況と出力を表示します。",
|
|
||||||
"viewProgressOnCanvas": "<Btn>キャンバス</Btn> で進行状況とステージ出力を表示します。",
|
|
||||||
"rasterLayer_withCount_other": "ラスターレイヤー",
|
|
||||||
"controlLayer_withCount_other": "コントロールレイヤー",
|
|
||||||
"regionalGuidance_withCount_hidden": "地域ガイダンス({{count}} 件非表示)",
|
|
||||||
"controlLayers_withCount_hidden": "コントロールレイヤー({{count}} 個非表示)",
|
|
||||||
"rasterLayers_withCount_hidden": "ラスター レイヤー ({{count}} 個非表示)",
|
|
||||||
"globalReferenceImages_withCount_hidden": "グローバル参照画像({{count}} 枚非表示)",
|
|
||||||
"regionalGuidance_withCount_visible": "地域ガイダンス ({{count}})",
|
|
||||||
"controlLayers_withCount_visible": "コントロールレイヤー ({{count}})",
|
|
||||||
"rasterLayers_withCount_visible": "ラスターレイヤー({{count}})",
|
|
||||||
"globalReferenceImages_withCount_visible": "グローバル参照画像 ({{count}})",
|
|
||||||
"layer_other": "レイヤー",
|
|
||||||
"layer_withCount_other": "レイヤー ({{count}})",
|
|
||||||
"convertRasterLayerTo": "$t(controlLayers.rasterLayer) を変換する",
|
|
||||||
"convertControlLayerTo": "$t(controlLayers.controlLayer) を変換する",
|
|
||||||
"convertRegionalGuidanceTo": "$t(controlLayers.regionalGuidance) を変換する",
|
|
||||||
"copyRasterLayerTo": "$t(controlLayers.rasterLayer)をコピーする",
|
|
||||||
"copyControlLayerTo": "$t(controlLayers.controlLayer) をコピーする",
|
|
||||||
"copyRegionalGuidanceTo": "$t(controlLayers.regionalGuidance)をコピーする",
|
|
||||||
"newRasterLayer": "新しい $t(controlLayers.rasterLayer)",
|
|
||||||
"newControlLayer": "新しい $t(controlLayers.controlLayer)",
|
|
||||||
"newInpaintMask": "新しい $t(controlLayers.inpaintMask)",
|
|
||||||
"newRegionalGuidance": "新しい $t(controlLayers.regionalGuidance)",
|
|
||||||
"pasteTo": "貼り付け先",
|
|
||||||
"pasteToAssets": "アセット",
|
|
||||||
"pasteToAssetsDesc": "アセットに貼り付け",
|
|
||||||
"pasteToBbox": "Bボックス",
|
|
||||||
"pasteToBboxDesc": "新しいレイヤー(Bbox内)",
|
|
||||||
"pasteToCanvas": "キャンバス",
|
|
||||||
"pasteToCanvasDesc": "新しいレイヤー(キャンバス内)",
|
|
||||||
"pastedTo": "{{destination}} に貼り付けました",
|
|
||||||
"transparency": "透明性",
|
|
||||||
"enableTransparencyEffect": "透明効果を有効にする",
|
|
||||||
"disableTransparencyEffect": "透明効果を無効にする",
|
|
||||||
"hidingType": "{{type}} を非表示",
|
|
||||||
"showingType": "{{type}}を表示",
|
|
||||||
"showNonRasterLayers": "非ラスターレイヤーを表示 (Shift+H)",
|
|
||||||
"hideNonRasterLayers": "非ラスターレイヤーを非表示にする (Shift+H)",
|
|
||||||
"dynamicGrid": "ダイナミックグリッド",
|
|
||||||
"logDebugInfo": "デバッグ情報をログに記録する",
|
|
||||||
"locked": "ロックされています",
|
|
||||||
"unlocked": "ロック解除",
|
|
||||||
"deleteSelected": "選択項目を削除",
|
|
||||||
"stagingOnCanvas": "ステージング画像",
|
|
||||||
"replaceLayer": "レイヤーの置き換え",
|
|
||||||
"pullBboxIntoLayer": "Bboxをレイヤーに引き込む",
|
|
||||||
"pullBboxIntoReferenceImage": "Bboxを参照画像に取り込む",
|
|
||||||
"showProgressOnCanvas": "キャンバスに進捗状況を表示",
|
|
||||||
"useImage": "画像を使う",
|
|
||||||
"negativePrompt": "ネガティブプロンプト",
|
|
||||||
"beginEndStepPercentShort": "開始/終了 %",
|
|
||||||
"newGallerySession": "新しいギャラリーセッション",
|
|
||||||
"newGallerySessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成した画像はギャラリーに送信されます。",
|
|
||||||
"newCanvasSession": "新規キャンバスセッション",
|
|
||||||
"newCanvasSessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成はキャンバス上でステージングされます。",
|
|
||||||
"resetCanvasLayers": "キャンバスレイヤーをリセット",
|
|
||||||
"resetGenerationSettings": "生成設定をリセット",
|
|
||||||
"replaceCurrent": "現在のものを置き換える",
|
|
||||||
"controlLayerEmptyState": "<UploadButton>画像をアップロード</UploadButton>、<GalleryButton>ギャラリー</GalleryButton>からこのレイヤーに画像をドラッグ、<PullBboxButton>境界ボックスをこのレイヤーにプル</PullBboxButton>、またはキャンバスに描画して開始します。",
|
|
||||||
"referenceImageEmptyStateWithCanvasOptions": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグするか、<PullBboxButton>境界ボックスをこの参照画像にプル</PullBboxButton>します。",
|
|
||||||
"referenceImageEmptyState": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグします。",
|
|
||||||
"uploadOrDragAnImage": "ギャラリーから画像をドラッグするか、<UploadButton>画像をアップロード</UploadButton>します。",
|
|
||||||
"imageNoise": "画像ノイズ",
|
|
||||||
"denoiseLimit": "ノイズ除去制限",
|
|
||||||
"warnings": {
|
|
||||||
"problemsFound": "問題が見つかりました",
|
|
||||||
"unsupportedModel": "選択したベースモデルではレイヤーがサポートされていません",
|
|
||||||
"controlAdapterNoModelSelected": "制御レイヤーモデルが選択されていません",
|
|
||||||
"controlAdapterIncompatibleBaseModel": "互換性のない制御レイヤーベースモデル",
|
|
||||||
"controlAdapterNoControl": "コントロールが選択/描画されていません",
|
|
||||||
"ipAdapterNoModelSelected": "参照画像モデルが選択されていません",
|
|
||||||
"ipAdapterIncompatibleBaseModel": "互換性のない参照画像ベースモデル",
|
|
||||||
"ipAdapterNoImageSelected": "参照画像が選択されていません",
|
|
||||||
"rgNoPromptsOrIPAdapters": "テキストプロンプトや参照画像はありません",
|
|
||||||
"rgNegativePromptNotSupported": "選択されたベースモデルでは否定プロンプトはサポートされていません",
|
|
||||||
"rgReferenceImagesNotSupported": "選択されたベースモデルでは地域の参照画像はサポートされていません",
|
|
||||||
"rgAutoNegativeNotSupported": "選択したベースモデルでは自動否定はサポートされていません",
|
|
||||||
"rgNoRegion": "領域が描画されていません",
|
|
||||||
"fluxFillIncompatibleWithControlLoRA": "コントロールLoRAはFLUX Fillと互換性がありません"
|
|
||||||
},
|
|
||||||
"errors": {
|
|
||||||
"unableToFindImage": "画像が見つかりません",
|
|
||||||
"unableToLoadImage": "画像を読み込めません"
|
|
||||||
},
|
|
||||||
"ipAdapterMethod": {
|
|
||||||
"ipAdapterMethod": "モード",
|
|
||||||
"full": "スタイルと構成",
|
|
||||||
"fullDesc": "視覚スタイル (色、テクスチャ) と構成 (レイアウト、構造) を適用します。",
|
|
||||||
"style": "スタイル(シンプル)",
|
|
||||||
"styleDesc": "レイアウトを考慮せずに視覚スタイル(色、テクスチャ)を適用します。以前は「スタイルのみ」と呼ばれていました。",
|
|
||||||
"composition": "構成のみ",
|
|
||||||
"compositionDesc": "参照スタイルを無視してレイアウトと構造を複製します。",
|
|
||||||
"styleStrong": "スタイル(ストロング)",
|
|
||||||
"styleStrongDesc": "構成への影響をわずかに抑えて、強力なビジュアル スタイルを適用します。",
|
|
||||||
"stylePrecise": "スタイル(正確)",
|
|
||||||
"stylePreciseDesc": "被写体の影響を排除し、正確な視覚スタイルを適用します。"
|
|
||||||
},
|
|
||||||
"fluxReduxImageInfluence": {
|
|
||||||
"imageInfluence": "イメージの影響力",
|
|
||||||
"lowest": "最低",
|
|
||||||
"low": "低",
|
|
||||||
"medium": "中",
|
|
||||||
"high": "高",
|
|
||||||
"highest": "最高"
|
|
||||||
},
|
|
||||||
"fill": {
|
|
||||||
"fillColor": "塗りつぶし色",
|
|
||||||
"fillStyle": "塗りつぶしスタイル",
|
|
||||||
"solid": "固体",
|
|
||||||
"grid": "グリッド",
|
|
||||||
"crosshatch": "クロスハッチ",
|
|
||||||
"vertical": "垂直",
|
|
||||||
"horizontal": "水平",
|
|
||||||
"diagonal": "対角線"
|
|
||||||
},
|
|
||||||
"selectObject": {
|
|
||||||
"selectObject": "オブジェクトを選択",
|
|
||||||
"pointType": "ポイントタイプ",
|
|
||||||
"invertSelection": "選択範囲を反転",
|
|
||||||
"include": "含む",
|
|
||||||
"exclude": "除外",
|
|
||||||
"neutral": "ニュートラル",
|
|
||||||
"apply": "適用",
|
|
||||||
"reset": "リセット",
|
|
||||||
"saveAs": "名前を付けて保存",
|
|
||||||
"cancel": "キャンセル",
|
|
||||||
"process": "プロセス",
|
|
||||||
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
|
|
||||||
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
|
|
||||||
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
|
|
||||||
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
|
|
||||||
"dragToMove": "ポイントをドラッグして移動します",
|
|
||||||
"clickToRemove": "ポイントをクリックして削除します"
|
|
||||||
},
|
|
||||||
"HUD": {
|
|
||||||
"bbox": "Bボックス",
|
|
||||||
"scaledBbox": "スケールされたBボックス",
|
|
||||||
"entityStatus": {
|
|
||||||
"isFiltering": "{{title}} はフィルタリング中です",
|
|
||||||
"isTransforming": "{{title}}は変化しています",
|
|
||||||
"isLocked": "{{title}}はロックされています",
|
|
||||||
"isHidden": "{{title}}は非表示になっています",
|
|
||||||
"isDisabled": "{{title}}は無効です",
|
|
||||||
"isEmpty": "{{title}} は空です"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"stagingArea": {
|
|
||||||
"accept": "受け入れる",
|
|
||||||
"discardAll": "すべて破棄",
|
|
||||||
"discard": "破棄する",
|
|
||||||
"previous": "前へ",
|
|
||||||
"next": "次へ",
|
|
||||||
"saveToGallery": "ギャラリーに保存",
|
|
||||||
"showResultsOn": "結果を表示",
|
|
||||||
"showResultsOff": "結果を隠す"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"stylePresets": {
|
"stylePresets": {
|
||||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||||
@@ -2305,56 +1810,13 @@
|
|||||||
"nameColumn": "'name'",
|
"nameColumn": "'name'",
|
||||||
"type": "タイプ",
|
"type": "タイプ",
|
||||||
"private": "プライベート",
|
"private": "プライベート",
|
||||||
"name": "名称",
|
"name": "名称"
|
||||||
"active": "アクティブ",
|
|
||||||
"copyTemplate": "テンプレートをコピー",
|
|
||||||
"deleteImage": "画像を削除",
|
|
||||||
"deleteTemplate": "テンプレートを削除",
|
|
||||||
"deleteTemplate2": "このテンプレートを削除してもよろしいですか? 元に戻すことはできません。",
|
|
||||||
"exportPromptTemplates": "プロンプトテンプレートをエクスポートする(CSV)",
|
|
||||||
"editTemplate": "テンプレートを編集",
|
|
||||||
"exportDownloaded": "エクスポートをダウンロードしました",
|
|
||||||
"exportFailed": "生成とCSVのダウンロードができません",
|
|
||||||
"importTemplates": "プロンプトテンプレートのインポート(CSV/JSON)",
|
|
||||||
"acceptedColumnsKeys": "受け入れられる列/キー:",
|
|
||||||
"positivePromptColumn": "'プロンプト'または'ポジティブプロンプト'",
|
|
||||||
"insertPlaceholder": "プレースホルダーを挿入",
|
|
||||||
"negativePrompt": "ネガティブプロンプト",
|
|
||||||
"noTemplates": "テンプレートがありません",
|
|
||||||
"noMatchingTemplates": "マッチするテンプレートがありません",
|
|
||||||
"promptTemplatesDesc1": "プロンプトテンプレートは、プロンプトボックスに書き込むプロンプトにテキストを追加します。",
|
|
||||||
"promptTemplatesDesc2": "テンプレート内でプロンプトを含める場所を指定するには <Pre>{{placeholder}}</Pre> のプレースホルダーの文字列を使用します。",
|
|
||||||
"promptTemplatesDesc3": "プレースホルダーを省略すると、テンプレートはプロンプトの末尾に追加されます。",
|
|
||||||
"positivePrompt": "ポジティブプロンプト",
|
|
||||||
"shared": "共有",
|
|
||||||
"sharedTemplates": "テンプレートを共有",
|
|
||||||
"templateDeleted": "プロンプトテンプレートを削除しました",
|
|
||||||
"unableToDeleteTemplate": "プロンプトテンプレートを削除できません",
|
|
||||||
"updatePromptTemplate": "プロンプトテンプレートをアップデート",
|
|
||||||
"useForTemplate": "プロンプトテンプレートに使用する",
|
|
||||||
"viewList": "テンプレートリストを表示",
|
|
||||||
"viewModeTooltip": "現在選択されているテンプレートでは、プロンプトはこのようになります。プロンプトを編集するには、テキストボックス内の任意の場所をクリックしてください。",
|
|
||||||
"togglePromptPreviews": "プロンプトプレビューを切り替える"
|
|
||||||
},
|
},
|
||||||
"upscaling": {
|
"upscaling": {
|
||||||
"upscaleModel": "アップスケールモデル",
|
"upscaleModel": "アップスケールモデル",
|
||||||
"postProcessingModel": "ポストプロセスモデル",
|
"postProcessingModel": "ポストプロセスモデル",
|
||||||
"upscale": "アップスケール",
|
"upscale": "アップスケール",
|
||||||
"scale": "スケール",
|
"scale": "スケール"
|
||||||
"creativity": "創造性",
|
|
||||||
"exceedsMaxSize": "アップスケール設定が最大サイズ制限を超えています",
|
|
||||||
"exceedsMaxSizeDetails": "アップスケールの上限は{{max Upscale Dimension}} x {{max Upscale Dimension}}ピクセルです。画像を小さくするか、スケールの選択範囲を小さくしてください。",
|
|
||||||
"structure": "構造",
|
|
||||||
"postProcessingMissingModelWarning": "後処理 (img2img) モデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
|
||||||
"missingModelsWarning": "必要なモデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
|
||||||
"mainModelDesc": "メインモデル(SD1.5またはSDXLアーキテクチャ)",
|
|
||||||
"tileControlNetModelDesc": "選択したメインモデルアーキテクチャのタイルコントロールネットモデル",
|
|
||||||
"upscaleModelDesc": "アップスケール(img2img)モデル",
|
|
||||||
"missingUpscaleInitialImage": "アップスケール用の初期画像がありません",
|
|
||||||
"missingUpscaleModel": "アップスケールモデルがありません",
|
|
||||||
"missingTileControlNetModel": "有効なタイル コントロールネットモデルがインストールされていません",
|
|
||||||
"incompatibleBaseModel": "アップスケーリングにサポートされていないメインモデルアーキテクチャです",
|
|
||||||
"incompatibleBaseModelDesc": "アップスケーリングはSD1.5およびSDXLアーキテクチャモデルでのみサポートされています。アップスケーリングを有効にするには、メインモデルを変更してください。"
|
|
||||||
},
|
},
|
||||||
"sdxl": {
|
"sdxl": {
|
||||||
"denoisingStrength": "ノイズ除去強度",
|
"denoisingStrength": "ノイズ除去強度",
|
||||||
@@ -2429,34 +1891,7 @@
|
|||||||
"minimum": "最小",
|
"minimum": "最小",
|
||||||
"publish": "公開",
|
"publish": "公開",
|
||||||
"unpublish": "非公開",
|
"unpublish": "非公開",
|
||||||
"publishedWorkflowInputs": "インプット",
|
"publishedWorkflowInputs": "インプット"
|
||||||
"workflowLocked": "ワークフローがロックされました",
|
|
||||||
"workflowLockedPublished": "公開済みのワークフローは編集用にロックされています。\nワークフローを非公開にして編集したり、コピーを作成したりできます。",
|
|
||||||
"workflowLockedDuringPublishing": "公開の構成中にワークフローがロックされます。",
|
|
||||||
"selectOutputNode": "出力ノードを選択",
|
|
||||||
"changeOutputNode": "出力ノードの変更",
|
|
||||||
"unpublishableInputs": "これらの公開できない入力は省略されます",
|
|
||||||
"noPublishableInputs": "公開可能な入力はありません",
|
|
||||||
"noOutputNodeSelected": "出力ノードが選択されていません",
|
|
||||||
"cannotPublish": "ワークフローを公開できません",
|
|
||||||
"publishWarnings": "警告",
|
|
||||||
"errorWorkflowHasUnsavedChanges": "ワークフローに保存されていない変更があります",
|
|
||||||
"errorWorkflowHasUnpublishableNodes": "ワークフローにはバッチ、ジェネレータ、またはメタデータ抽出ノードがあります",
|
|
||||||
"errorWorkflowHasInvalidGraph": "ワークフロー グラフが無効です (詳細については [呼び出し] ボタンにマウスを移動してください)",
|
|
||||||
"errorWorkflowHasNoOutputNode": "出力ノードが選択されていません",
|
|
||||||
"warningWorkflowHasNoPublishableInputFields": "公開可能な入力フィールドが選択されていません - 公開されたワークフローはデフォルト値のみで実行されます",
|
|
||||||
"warningWorkflowHasUnpublishableInputFields": "ワークフローには公開できない入力がいくつかあります。これらは公開されたワークフローから省略されます",
|
|
||||||
"publishFailed": "公開失敗",
|
|
||||||
"publishFailedDesc": "ワークフローの公開中に問題が発生しました。もう一度お試しください。",
|
|
||||||
"publishSuccess": "ワークフローを公開しています",
|
|
||||||
"publishSuccessDesc": "<LinkComponent>プロジェクト ダッシュボード</LinkComponent> をチェックして進捗状況を確認してください。",
|
|
||||||
"publishInProgress": "公開中",
|
|
||||||
"publishedWorkflowIsLocked": "公開されたワークフローはロックされています",
|
|
||||||
"publishingValidationRun": "公開検証実行",
|
|
||||||
"publishingValidationRunInProgress": "公開検証の実行が進行中です。",
|
|
||||||
"publishedWorkflowsLocked": "公開済みのワークフローはロックされており、編集または実行できません。このワークフローを編集または実行するには、ワークフローを非公開にするか、コピーを保存してください。",
|
|
||||||
"selectingOutputNode": "出力ノードの選択",
|
|
||||||
"selectingOutputNodeDesc": "ノードをクリックして、ワークフローの出力ノードとして選択します。"
|
|
||||||
},
|
},
|
||||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||||
"unnamedWorkflow": "名前のないワークフロー",
|
"unnamedWorkflow": "名前のないワークフロー",
|
||||||
@@ -2519,23 +1954,15 @@
|
|||||||
"models": "モデル",
|
"models": "モデル",
|
||||||
"canvas": "キャンバス",
|
"canvas": "キャンバス",
|
||||||
"metadata": "メタデータ",
|
"metadata": "メタデータ",
|
||||||
"queue": "キュー",
|
"queue": "キュー"
|
||||||
"logNamespaces": "ログのネームスペース",
|
|
||||||
"dnd": "ドラッグ&ドロップ",
|
|
||||||
"config": "構成",
|
|
||||||
"generation": "生成",
|
|
||||||
"events": "イベント"
|
|
||||||
},
|
},
|
||||||
"logLevel": {
|
"logLevel": {
|
||||||
"debug": "Debug",
|
"debug": "Debug",
|
||||||
"info": "Info",
|
"info": "Info",
|
||||||
"error": "Error",
|
"error": "Error",
|
||||||
"fatal": "Fatal",
|
"fatal": "Fatal",
|
||||||
"warn": "Warn",
|
"warn": "Warn"
|
||||||
"logLevel": "ログレベル",
|
}
|
||||||
"trace": "追跡"
|
|
||||||
},
|
|
||||||
"enableLogging": "ログを有効にする"
|
|
||||||
},
|
},
|
||||||
"dynamicPrompts": {
|
"dynamicPrompts": {
|
||||||
"promptsPreview": "プロンプトプレビュー",
|
"promptsPreview": "プロンプトプレビュー",
|
||||||
@@ -2551,34 +1978,5 @@
|
|||||||
"dynamicPrompts": "ダイナミックプロンプト",
|
"dynamicPrompts": "ダイナミックプロンプト",
|
||||||
"loading": "ダイナミックプロンプトを生成...",
|
"loading": "ダイナミックプロンプトを生成...",
|
||||||
"maxPrompts": "最大プロンプト"
|
"maxPrompts": "最大プロンプト"
|
||||||
},
|
|
||||||
"upsell": {
|
|
||||||
"inviteTeammates": "チームメートを招待",
|
|
||||||
"professional": "プロフェッショナル",
|
|
||||||
"professionalUpsell": "InvokeのProfessional Editionでご利用いただけます。詳細については、こちらをクリックするか、invoke.com/pricingをご覧ください。",
|
|
||||||
"shareAccess": "共有アクセス"
|
|
||||||
},
|
|
||||||
"newUserExperience": {
|
|
||||||
"toGetStartedLocal": "始めるには、Invoke の実行に必要なモデルをダウンロードまたはインポートしてください。次に、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
|
||||||
"toGetStarted": "開始するには、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
|
||||||
"toGetStartedWorkflow": "開始するには、左側のフィールドに入力し、<StrongComponent>Invoke</StrongComponent> をクリックして画像を生成します。他のワークフローも試してみたい場合は、ワークフロータイトルの横にある<StrongComponent>フォルダアイコン</StrongComponent> をクリックすると、試せる他のテンプレートのリストが表示されます。",
|
|
||||||
"gettingStartedSeries": "さらに詳しいガイダンスが必要ですか? Invoke Studio の可能性を最大限に引き出すためのヒントについては、<LinkComponent>入門シリーズ</LinkComponent>をご覧ください。",
|
|
||||||
"lowVRAMMode": "最高のパフォーマンスを得るには、<LinkComponent>低 VRAM ガイド</LinkComponent>に従ってください。",
|
|
||||||
"noModelsInstalled": "モデルがインストールされていないようです。<DownloadStarterModelsButton>スターターモデルバンドルをダウンロード</DownloadStarterModelsButton>するか、<ImportModelsButton>モデルをインポート</ImportModelsButton>してください。"
|
|
||||||
},
|
|
||||||
"whatsNew": {
|
|
||||||
"whatsNewInInvoke": "Invokeの新機能",
|
|
||||||
"items": [
|
|
||||||
"インペインティング: マスクごとのノイズ レベルとノイズ除去の制限。",
|
|
||||||
"キャンバス: SDXL のアスペクト比がスマートになり、スクロールによるズームが改善されました。"
|
|
||||||
],
|
|
||||||
"readReleaseNotes": "リリースノートを読む",
|
|
||||||
"watchRecentReleaseVideos": "最近のリリースビデオを見る",
|
|
||||||
"watchUiUpdatesOverview": "Watch UI アップデートの概要"
|
|
||||||
},
|
|
||||||
"supportVideos": {
|
|
||||||
"supportVideos": "サポートビデオ",
|
|
||||||
"gettingStarted": "はじめる",
|
|
||||||
"watch": "ウォッチ"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@
|
|||||||
"bulkDownloadFailed": "Tải Xuống Thất Bại",
|
"bulkDownloadFailed": "Tải Xuống Thất Bại",
|
||||||
"bulkDownloadRequestFailed": "Có Vấn Đề Khi Đang Chuẩn Bị Tải Xuống",
|
"bulkDownloadRequestFailed": "Có Vấn Đề Khi Đang Chuẩn Bị Tải Xuống",
|
||||||
"download": "Tải Xuống",
|
"download": "Tải Xuống",
|
||||||
"dropOrUpload": "Kéo Thả Hoặc Tải Lên",
|
"dropOrUpload": "$t(gallery.drop) Hoặc Tải Lên",
|
||||||
"currentlyInUse": "Hình ảnh này hiện đang sử dụng các tính năng sau:",
|
"currentlyInUse": "Hình ảnh này hiện đang sử dụng các tính năng sau:",
|
||||||
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
|
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
|
||||||
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
|
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
|
||||||
@@ -111,7 +111,7 @@
|
|||||||
"noImageSelected": "Không Có Ảnh Được Chọn",
|
"noImageSelected": "Không Có Ảnh Được Chọn",
|
||||||
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
|
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
|
||||||
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
|
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
|
||||||
"imagesTab": "Ảnh bạn vừa được tạo và lưu trong Invoke.",
|
"imagesTab": "hình bạn vừa được tạo và lưu trong Invoke.",
|
||||||
"loading": "Đang Tải",
|
"loading": "Đang Tải",
|
||||||
"oldestFirst": "Cũ Nhất Trước",
|
"oldestFirst": "Cũ Nhất Trước",
|
||||||
"exitCompare": "Ngừng So Sánh",
|
"exitCompare": "Ngừng So Sánh",
|
||||||
@@ -122,8 +122,7 @@
|
|||||||
"boardsSettings": "Thiết Lập Bảng",
|
"boardsSettings": "Thiết Lập Bảng",
|
||||||
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
|
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
|
||||||
"assets": "Tài Nguyên",
|
"assets": "Tài Nguyên",
|
||||||
"images": "Hình Ảnh",
|
"images": "Hình Ảnh"
|
||||||
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
|
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"ipAdapter": "IP Adapter",
|
"ipAdapter": "IP Adapter",
|
||||||
@@ -255,18 +254,9 @@
|
|||||||
"options_withCount_other": "{{count}} thiết lập"
|
"options_withCount_other": "{{count}} thiết lập"
|
||||||
},
|
},
|
||||||
"prompt": {
|
"prompt": {
|
||||||
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
|
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||||
"compatibleEmbeddings": "Embedding Tương Thích",
|
"compatibleEmbeddings": "Embedding Tương Thích",
|
||||||
"noMatchingTriggers": "Không có trigger phù hợp",
|
"noMatchingTriggers": "Không có trigger phù hợp"
|
||||||
"generateFromImage": "Tạo sinh lệnh từ ảnh",
|
|
||||||
"expandCurrentPrompt": "Mở Rộng Lệnh Hiện Tại",
|
|
||||||
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
|
|
||||||
"expandingPrompt": "Đang mở rộng lệnh...",
|
|
||||||
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
|
|
||||||
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
|
|
||||||
"replace": "Thay Thế",
|
|
||||||
"insert": "Chèn",
|
|
||||||
"discard": "Huỷ Bỏ"
|
|
||||||
},
|
},
|
||||||
"queue": {
|
"queue": {
|
||||||
"resume": "Tiếp Tục",
|
"resume": "Tiếp Tục",
|
||||||
@@ -299,7 +289,7 @@
|
|||||||
"pruneTooltip": "Cắt bớt {{item_count}} mục đã hoàn tất",
|
"pruneTooltip": "Cắt bớt {{item_count}} mục đã hoàn tất",
|
||||||
"pruneSucceeded": "Đã cắt bớt {{item_count}} mục đã hoàn tất khỏi hàng",
|
"pruneSucceeded": "Đã cắt bớt {{item_count}} mục đã hoàn tất khỏi hàng",
|
||||||
"clearTooltip": "Huỷ Và Dọn Dẹp Tất Cả Mục",
|
"clearTooltip": "Huỷ Và Dọn Dẹp Tất Cả Mục",
|
||||||
"clearQueueAlertDialog": "Dọn dẹp hàng đợi sẽ ngay lập tức huỷ tất cả mục đang xử lý và làm sạch hàng hoàn toàn. Bộ lọc đang chờ xử lý sẽ bị huỷ bỏ và Vùng Dựng Canva sẽ được khởi động lại.",
|
"clearQueueAlertDialog": "Dọn dẹp hàng đợi sẽ ngay lập tức huỷ tất cả mục đang xử lý và làm sạch hàng hoàn toàn. Bộ lọc đang chờ xử lý sẽ bị huỷ bỏ.",
|
||||||
"session": "Phiên",
|
"session": "Phiên",
|
||||||
"item": "Mục",
|
"item": "Mục",
|
||||||
"resumeFailed": "Có Vấn Đề Khi Tiếp Tục Bộ Xử Lý",
|
"resumeFailed": "Có Vấn Đề Khi Tiếp Tục Bộ Xử Lý",
|
||||||
@@ -343,14 +333,13 @@
|
|||||||
"retrySucceeded": "Mục Đã Thử Lại",
|
"retrySucceeded": "Mục Đã Thử Lại",
|
||||||
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
|
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
|
||||||
"retryItem": "Thử Lại Mục",
|
"retryItem": "Thử Lại Mục",
|
||||||
"credits": "Nguồn",
|
"credits": "Nguồn"
|
||||||
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại"
|
|
||||||
},
|
},
|
||||||
"hotkeys": {
|
"hotkeys": {
|
||||||
"canvas": {
|
"canvas": {
|
||||||
"fitLayersToCanvas": {
|
"fitLayersToCanvas": {
|
||||||
"title": "Xếp Vừa Layers Vào Canvas",
|
"title": "Xếp Vừa Layers Vào Canvas",
|
||||||
"desc": "Căn chỉnh để góc nhìn vừa vặn với tất cả layer nhìn thấy dược."
|
"desc": "Căn chỉnh để góc nhìn vừa vặn với tất cả layer."
|
||||||
},
|
},
|
||||||
"setZoomTo800Percent": {
|
"setZoomTo800Percent": {
|
||||||
"desc": "Phóng to canvas lên 800%.",
|
"desc": "Phóng to canvas lên 800%.",
|
||||||
@@ -464,34 +453,6 @@
|
|||||||
"applyFilter": {
|
"applyFilter": {
|
||||||
"title": "Áp Dụng Bộ Lộc",
|
"title": "Áp Dụng Bộ Lộc",
|
||||||
"desc": "Áp dụng bộ lọc đang chờ sẵn cho layer được chọn."
|
"desc": "Áp dụng bộ lọc đang chờ sẵn cho layer được chọn."
|
||||||
},
|
|
||||||
"settings": {
|
|
||||||
"behavior": "Hành Vi",
|
|
||||||
"display": "Hiển Thị",
|
|
||||||
"grid": "Lưới",
|
|
||||||
"debug": "Gỡ Lỗi"
|
|
||||||
},
|
|
||||||
"toggleNonRasterLayers": {
|
|
||||||
"title": "Bật/Tắt Layer Không Thuộc Dạng Raster",
|
|
||||||
"desc": "Hiện hoặc ẩn tất cả layer không thuộc dạng raster (Layer Điều Khiển Được, Lớp Phủ Inpaint, Chỉ Dẫn Khu Vực)."
|
|
||||||
},
|
|
||||||
"invertMask": {
|
|
||||||
"title": "Đảo Ngược Lớp Phủ",
|
|
||||||
"desc": "Đảo ngược lớp phủ inpaint được chọn, tạo một lớp phủ mới với độ trong suốt đối nghịch."
|
|
||||||
},
|
|
||||||
"fitBboxToMasks": {
|
|
||||||
"title": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
|
|
||||||
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào lớp phủ inpaint nhìn thấy được"
|
|
||||||
},
|
|
||||||
"applySegmentAnything": {
|
|
||||||
"title": "Áp Dụng Segment Anything",
|
|
||||||
"desc": "Áp dụng lớp phủ Segment Anything hiện tại.",
|
|
||||||
"key": "enter"
|
|
||||||
},
|
|
||||||
"cancelSegmentAnything": {
|
|
||||||
"title": "Huỷ Segment Anything",
|
|
||||||
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
|
|
||||||
"key": "esc"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"workflows": {
|
"workflows": {
|
||||||
@@ -621,10 +582,6 @@
|
|||||||
"clearSelection": {
|
"clearSelection": {
|
||||||
"desc": "Xoá phần lựa chọn hiện tại nếu có.",
|
"desc": "Xoá phần lựa chọn hiện tại nếu có.",
|
||||||
"title": "Xoá Phần Lựa Chọn"
|
"title": "Xoá Phần Lựa Chọn"
|
||||||
},
|
|
||||||
"starImage": {
|
|
||||||
"title": "Dấu/Huỷ Sao Hình Ảnh",
|
|
||||||
"desc": "Đánh dấu sao hoặc huỷ đánh dấu sao ảnh được chọn."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"app": {
|
"app": {
|
||||||
@@ -684,11 +641,6 @@
|
|||||||
"selectModelsTab": {
|
"selectModelsTab": {
|
||||||
"desc": "Chọn tab Model (Mô Hình).",
|
"desc": "Chọn tab Model (Mô Hình).",
|
||||||
"title": "Chọn Tab Model"
|
"title": "Chọn Tab Model"
|
||||||
},
|
|
||||||
"selectGenerateTab": {
|
|
||||||
"title": "Chọn Tab Tạo Sinh",
|
|
||||||
"desc": "Chọn tab Tạo Sinh.",
|
|
||||||
"key": "1"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"searchHotkeys": "Tìm Phím tắt",
|
"searchHotkeys": "Tìm Phím tắt",
|
||||||
@@ -743,7 +695,7 @@
|
|||||||
"cancel": "Huỷ",
|
"cancel": "Huỷ",
|
||||||
"huggingFace": "HuggingFace (HF)",
|
"huggingFace": "HuggingFace (HF)",
|
||||||
"huggingFacePlaceholder": "chủ-sỡ-hữu/tên-model",
|
"huggingFacePlaceholder": "chủ-sỡ-hữu/tên-model",
|
||||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó.",
|
"includesNModels": "Thêm vào {{n}} model và dependency của nó",
|
||||||
"localOnly": "chỉ ở trên máy chủ",
|
"localOnly": "chỉ ở trên máy chủ",
|
||||||
"manual": "Thủ Công",
|
"manual": "Thủ Công",
|
||||||
"convertToDiffusersHelpText4": "Đây là quá trình diễn ra chỉ một lần. Nó có thể tốn tầm 30-60 giây tuỳ theo thông số kỹ thuật của máy tính.",
|
"convertToDiffusersHelpText4": "Đây là quá trình diễn ra chỉ một lần. Nó có thể tốn tầm 30-60 giây tuỳ theo thông số kỹ thuật của máy tính.",
|
||||||
@@ -790,7 +742,7 @@
|
|||||||
"simpleModelPlaceholder": "Url hoặc đường đẫn đến tệp hoặc thư mục chứa diffusers trong máy chủ",
|
"simpleModelPlaceholder": "Url hoặc đường đẫn đến tệp hoặc thư mục chứa diffusers trong máy chủ",
|
||||||
"selectModel": "Chọn Model",
|
"selectModel": "Chọn Model",
|
||||||
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
|
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
|
||||||
"starterBundles": "Gói Khởi Đầu",
|
"starterBundles": "Quà Tân Thủ",
|
||||||
"vae": "VAE",
|
"vae": "VAE",
|
||||||
"urlOrLocalPath": "URL / Đường Dẫn",
|
"urlOrLocalPath": "URL / Đường Dẫn",
|
||||||
"triggerPhrases": "Từ Ngữ Kích Hoạt",
|
"triggerPhrases": "Từ Ngữ Kích Hoạt",
|
||||||
@@ -842,30 +794,7 @@
|
|||||||
"manageModels": "Quản Lý Model",
|
"manageModels": "Quản Lý Model",
|
||||||
"hfTokenReset": "Làm Mới HF Token",
|
"hfTokenReset": "Làm Mới HF Token",
|
||||||
"relatedModels": "Model Liên Quan",
|
"relatedModels": "Model Liên Quan",
|
||||||
"showOnlyRelatedModels": "Liên Quan",
|
"showOnlyRelatedModels": "Liên Quan"
|
||||||
"installedModelsCount": "Đã tải {{installed}} trên {{total}} model.",
|
|
||||||
"allNModelsInstalled": "Đã tải tất cả {{count}} model",
|
|
||||||
"nToInstall": "Còn {{count}} để tải",
|
|
||||||
"nAlreadyInstalled": "Có {{count}} đã tải",
|
|
||||||
"bundleAlreadyInstalled": "Gói đã được cài sẵn",
|
|
||||||
"bundleAlreadyInstalledDesc": "Tất cả model trong gói {{bundleName}} đã được cài sẵn.",
|
|
||||||
"launchpadTab": "Launchpad",
|
|
||||||
"launchpad": {
|
|
||||||
"welcome": "Chào mừng đến Trình Quản Lý Model",
|
|
||||||
"description": "Invoke yêu cầu tải model nhằm tối ưu hoá các tính năng trên nền tảng. Chọn tải các phương án thủ công hoặc khám phá các model khởi đầu thích hợp.",
|
|
||||||
"manualInstall": "Tải Thủ Công",
|
|
||||||
"urlDescription": "Tải model bằng URL hoặc đường dẫn trên máy. Phù hợp để cụ thể model muốn thêm vào.",
|
|
||||||
"huggingFaceDescription": "Duyệt và cài đặt model từ các repository trên HuggingFace.",
|
|
||||||
"scanFolderDescription": "Quét một thư mục trên máy để tự động tra và tải model.",
|
|
||||||
"recommendedModels": "Model Khuyến Nghị",
|
|
||||||
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
|
|
||||||
"quickStart": "Gói Khởi Đầu Nhanh",
|
|
||||||
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
|
|
||||||
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
|
|
||||||
"stableDiffusion15": "Stable Diffusion 1.5",
|
|
||||||
"sdxl": "SDXL",
|
|
||||||
"fluxDev": "FLUX.1 dev"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"guidance": "Hướng Dẫn",
|
"guidance": "Hướng Dẫn",
|
||||||
@@ -873,7 +802,7 @@
|
|||||||
"imageDetails": "Chi Tiết Ảnh",
|
"imageDetails": "Chi Tiết Ảnh",
|
||||||
"createdBy": "Được Tạo Bởi",
|
"createdBy": "Được Tạo Bởi",
|
||||||
"parsingFailed": "Lỗi Cú Pháp",
|
"parsingFailed": "Lỗi Cú Pháp",
|
||||||
"canvasV2Metadata": "Layer Canvas",
|
"canvasV2Metadata": "Canvas",
|
||||||
"parameterSet": "Dữ liệu tham số {{parameter}}",
|
"parameterSet": "Dữ liệu tham số {{parameter}}",
|
||||||
"positivePrompt": "Lệnh Tích Cực",
|
"positivePrompt": "Lệnh Tích Cực",
|
||||||
"recallParameter": "Gợi Nhớ {{label}}",
|
"recallParameter": "Gợi Nhớ {{label}}",
|
||||||
@@ -1118,23 +1047,7 @@
|
|||||||
"unknownField_withName": "Vùng Dữ Liệu Không Rõ \"{{name}}\"",
|
"unknownField_withName": "Vùng Dữ Liệu Không Rõ \"{{name}}\"",
|
||||||
"unexpectedField_withName": "Sai Vùng Dữ Liệu \"{{name}}\"",
|
"unexpectedField_withName": "Sai Vùng Dữ Liệu \"{{name}}\"",
|
||||||
"unknownFieldEditWorkflowToFix_withName": "Workflow chứa vùng dữ liệu không rõ \"{{name}}\".\nHãy biên tập workflow để sửa lỗi.",
|
"unknownFieldEditWorkflowToFix_withName": "Workflow chứa vùng dữ liệu không rõ \"{{name}}\".\nHãy biên tập workflow để sửa lỗi.",
|
||||||
"missingField_withName": "Thiếu Vùng Dữ Liệu \"{{name}}\"",
|
"missingField_withName": "Thiếu Vùng Dữ Liệu \"{{name}}\""
|
||||||
"layout": {
|
|
||||||
"autoLayout": "Bố Cục Tự Động",
|
|
||||||
"layeringStrategy": "Chiến Lược Phân Layer",
|
|
||||||
"networkSimplex": "Network Simplex",
|
|
||||||
"longestPath": "Đường Đi Dài Nhất",
|
|
||||||
"nodeSpacing": "Khoảng Cách Node",
|
|
||||||
"layerSpacing": "Khoảng Cách Layer",
|
|
||||||
"layoutDirection": "Hướng Bố Cục",
|
|
||||||
"layoutDirectionRight": "Phải",
|
|
||||||
"layoutDirectionDown": "Xuống",
|
|
||||||
"alignment": "Căn Chỉnh Node",
|
|
||||||
"alignmentUL": "Trên Cùng Bên Trái",
|
|
||||||
"alignmentDL": "Dưới Cùng Bên Trái",
|
|
||||||
"alignmentUR": "Trên Cùng Bên Phải",
|
|
||||||
"alignmentDR": "Dưới Cùng Bên Phải"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"popovers": {
|
"popovers": {
|
||||||
"paramCFGRescaleMultiplier": {
|
"paramCFGRescaleMultiplier": {
|
||||||
@@ -1561,20 +1474,6 @@
|
|||||||
"Lát khối liền mạch bức ảnh theo trục ngang."
|
"Lát khối liền mạch bức ảnh theo trục ngang."
|
||||||
],
|
],
|
||||||
"heading": "Lát Khối Liền Mạch Trục X"
|
"heading": "Lát Khối Liền Mạch Trục X"
|
||||||
},
|
|
||||||
"tileSize": {
|
|
||||||
"heading": "Kích Thước Khối",
|
|
||||||
"paragraphs": [
|
|
||||||
"Điều chỉnh kích thước của khối trong quá trình upscale. Khối càng lớn, bộ nhớ được sử dụng càng nhiều, nhưng có thể tạo sinh ảnh tốt hơn.",
|
|
||||||
"Model SD1.5 mặt định là 768, trong khi SDXL mặc định là 1024. Giảm kích thước khối nếu các gặp vấn đề bộ nhớ."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"tileOverlap": {
|
|
||||||
"heading": "Chồng Chéo Khối",
|
|
||||||
"paragraphs": [
|
|
||||||
"Điều chỉnh sự chồng chéo giữa các khối liền kề trong quá trình upscale. Giá trị chồng chép lớn giúp giảm sự rõ nét của các chỗ nối nhau, nhưng ngốn nhiều bộ nhớ hơn.",
|
|
||||||
"Giá trị mặc định (128) hoạt động tốt với đa số trường hợp, nhưng bạn có thể điều chỉnh cho phù hợp với nhu cầu cụ thể và hạn chế về bộ nhớ."
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"models": {
|
"models": {
|
||||||
@@ -1588,8 +1487,7 @@
|
|||||||
"defaultVAE": "VAE Mặc Định",
|
"defaultVAE": "VAE Mặc Định",
|
||||||
"noMatchingModels": "Không có Model phù hợp",
|
"noMatchingModels": "Không có Model phù hợp",
|
||||||
"noModelsAvailable": "Không có model",
|
"noModelsAvailable": "Không có model",
|
||||||
"selectModel": "Chọn Model",
|
"selectModel": "Chọn Model"
|
||||||
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
|
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
|
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
|
||||||
@@ -1640,10 +1538,7 @@
|
|||||||
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||||
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
|
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp."
|
||||||
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với LUX Kontext thông qua BFL API",
|
|
||||||
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
|
|
||||||
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
|
|
||||||
},
|
},
|
||||||
"cfgScale": "Thang CFG",
|
"cfgScale": "Thang CFG",
|
||||||
"useSeed": "Dùng Hạt Giống",
|
"useSeed": "Dùng Hạt Giống",
|
||||||
@@ -1974,8 +1869,7 @@
|
|||||||
"canvasGroup": "Canvas",
|
"canvasGroup": "Canvas",
|
||||||
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
|
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
|
||||||
"copyToClipboard": "Sao Chép Vào Clipboard",
|
"copyToClipboard": "Sao Chép Vào Clipboard",
|
||||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard",
|
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
|
||||||
"newResizedControlLayer": "Layer Điều Khiển Được Đã Chỉnh Kích Thước Mới"
|
|
||||||
},
|
},
|
||||||
"stagingArea": {
|
"stagingArea": {
|
||||||
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
|
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
|
||||||
@@ -2156,11 +2050,7 @@
|
|||||||
},
|
},
|
||||||
"isolatedLayerPreviewDesc": "Có hay không hiển thị riêng layer này khi thực hiện các thao tác như lọc hay biến đổi.",
|
"isolatedLayerPreviewDesc": "Có hay không hiển thị riêng layer này khi thực hiện các thao tác như lọc hay biến đổi.",
|
||||||
"isolatedStagingPreview": "Xem Trước Tổng Quan Phần Cô Lập",
|
"isolatedStagingPreview": "Xem Trước Tổng Quan Phần Cô Lập",
|
||||||
"isolatedPreview": "Xem Trước Phần Cô Lập",
|
"isolatedPreview": "Xem Trước Phần Cô Lập"
|
||||||
"saveAllImagesToGallery": {
|
|
||||||
"label": "Chuyển Sản Phẩm Tạo Sinh Mới Vào Thư Viện Ảnh",
|
|
||||||
"alert": "Đang chuyển sản phẩm tạo sinh mới vào Thư Viện Ảnh, bỏ qua Canvas"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"tool": {
|
"tool": {
|
||||||
"eraser": "Tẩy",
|
"eraser": "Tẩy",
|
||||||
@@ -2172,8 +2062,8 @@
|
|||||||
"colorPicker": "Chọn Màu"
|
"colorPicker": "Chọn Màu"
|
||||||
},
|
},
|
||||||
"mergingLayers": "Đang gộp layer",
|
"mergingLayers": "Đang gộp layer",
|
||||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ thư viện ảnh vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton> hoặc kéo ảnh từ thư viện ảnh vào Ảnh Mẫu để bắt đầu.",
|
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||||
"useImage": "Dùng Hình Ảnh",
|
"useImage": "Dùng Hình Ảnh",
|
||||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||||
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
|
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
|
||||||
@@ -2225,20 +2115,7 @@
|
|||||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)",
|
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ thư viện ảnh vào Ảnh Mẫu này, hoặc <PullBboxButton>kéo hộp giới hạn vào Ảnh Mẫu này</PullBboxButton> để bắt đầu.",
|
|
||||||
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>.",
|
|
||||||
"exportCanvasToPSD": "Xuất Canvas Thành File PSD",
|
|
||||||
"ruleOfThirds": "Hiển Thị Quy Tắc Một Phần Ba",
|
|
||||||
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
|
|
||||||
"hideNonRasterLayers": "Ẩn Layer Không Thuộc Dạng Raster (Shift + H)",
|
|
||||||
"autoSwitch": {
|
|
||||||
"off": "Tắt",
|
|
||||||
"switchOnStart": "Khi Bắt Đầu",
|
|
||||||
"switchOnFinish": "Khi Kết Thúc"
|
|
||||||
},
|
|
||||||
"fitBboxToMasks": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
|
|
||||||
"invertMask": "Đảo Ngược Lớp Phủ"
|
|
||||||
},
|
},
|
||||||
"stylePresets": {
|
"stylePresets": {
|
||||||
"negativePrompt": "Lệnh Tiêu Cực",
|
"negativePrompt": "Lệnh Tiêu Cực",
|
||||||
@@ -2284,8 +2161,7 @@
|
|||||||
"deleteImage": "Xoá Hình Ảnh",
|
"deleteImage": "Xoá Hình Ảnh",
|
||||||
"exportPromptTemplates": "Xuất Mẫu Trình Bày Cho Lệnh Ra (CSV)",
|
"exportPromptTemplates": "Xuất Mẫu Trình Bày Cho Lệnh Ra (CSV)",
|
||||||
"templateDeleted": "Mẫu trình bày cho lệnh đã được xoá",
|
"templateDeleted": "Mẫu trình bày cho lệnh đã được xoá",
|
||||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh",
|
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh"
|
||||||
"togglePromptPreviews": "Bật/Tắt Xem Trước Lệnh"
|
|
||||||
},
|
},
|
||||||
"system": {
|
"system": {
|
||||||
"enableLogging": "Bật Chế Độ Ghi Log",
|
"enableLogging": "Bật Chế Độ Ghi Log",
|
||||||
@@ -2381,34 +2257,7 @@
|
|||||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
|
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext không hỗ trợ tạo sinh từ hình ảnh từ canvas. Thử sử dụng Ảnh Mẫu và tắt các Layer Dạng Raster.",
|
|
||||||
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
|
|
||||||
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
|
|
||||||
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
|
|
||||||
"noActiveRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
|
||||||
"noVisibleRasterLayers": "Không Có Layer Dạng Raster Hiển Thị",
|
|
||||||
"noVisibleRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
|
||||||
"invalidCanvasDimensions": "Kích Thước Canvas Không Phù Hợp",
|
|
||||||
"canvasTooLarge": "Canvas Quá Lớn",
|
|
||||||
"canvasTooLargeDesc": "Kích thước canvas vượt mức tối đa cho phép để xuất file PSD. Giảm cả chiều dài và chiều rộng chủa canvas và thử lại.",
|
|
||||||
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
|
|
||||||
"psdExportSuccess": "Xuất File PSD Hoàn Tất",
|
|
||||||
"psdExportSuccessDesc": "Thành công xuất {{count}} layer sang file PSD",
|
|
||||||
"problemExportingPSD": "Có Vấn Đề Khi Xuất File PSD",
|
|
||||||
"canvasManagerNotAvailable": "Trình Quản Lý Canvas Không Có Sẵn",
|
|
||||||
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
|
|
||||||
"promptGenerationStarted": "Trình tạo sinh lệnh khởi động",
|
|
||||||
"uploadAndPromptGenerationFailed": "Thất bại khi tải lên ảnh để tạo sinh lệnh",
|
|
||||||
"promptExpansionFailed": "Có vấn đề xảy ra. Hãy thử mở rộng lệnh lại.",
|
|
||||||
"maskInverted": "Đã Đảo Ngược Lớp Phủ",
|
|
||||||
"maskInvertFailed": "Thất Bại Khi Đảo Ngược Lớp Phủ",
|
|
||||||
"noVisibleMasks": "Không Có Lớp Phủ Đang Hiển Thị",
|
|
||||||
"noVisibleMasksDesc": "Tạo hoặc bật ít nhất một lớp phủ inpaint để đảo ngược",
|
|
||||||
"noInpaintMaskSelected": "Không Có Lớp Phủ Inpant Được Chọn",
|
|
||||||
"noInpaintMaskSelectedDesc": "Chọn một lớp phủ inpaint để đảo ngược",
|
|
||||||
"invalidBbox": "Hộp Giới Hạn Không Hợp Lệ",
|
|
||||||
"invalidBboxDesc": "Hợp giới hạn có kích thước không hợp lệ"
|
|
||||||
},
|
},
|
||||||
"ui": {
|
"ui": {
|
||||||
"tabs": {
|
"tabs": {
|
||||||
@@ -2422,55 +2271,6 @@
|
|||||||
"queue": "Queue (Hàng Đợi)",
|
"queue": "Queue (Hàng Đợi)",
|
||||||
"workflows": "Workflow (Luồng Làm Việc)",
|
"workflows": "Workflow (Luồng Làm Việc)",
|
||||||
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
|
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
|
||||||
},
|
|
||||||
"launchpad": {
|
|
||||||
"workflowsTitle": "Đi sâu hơn với Workflow.",
|
|
||||||
"upscalingTitle": "Upscale và thêm chi tiết.",
|
|
||||||
"canvasTitle": "Biên tập và làm đẹp trên Canvas.",
|
|
||||||
"generateTitle": "Tạo sinh ảnh từ lệnh chữ.",
|
|
||||||
"modelGuideText": "Muốn biết lệnh nào tốt nhất cho từng model chứ?",
|
|
||||||
"modelGuideLink": "Xem thêm Hướng Dẫn Model.",
|
|
||||||
"workflows": {
|
|
||||||
"description": "Workflow là các template tái sử dụng được sẽ tự động hoá các tác vụ tạo sinh ảnh, cho phép bạn nhanh chóng thực hiện cách thao tác phức tạp và nhận được kết quả nhất quán.",
|
|
||||||
"learnMoreLink": "Học thêm cách tạo ra workflow",
|
|
||||||
"browseTemplates": {
|
|
||||||
"title": "Duyệt Template Workflow",
|
|
||||||
"description": "Chọn từ các workflow có sẵn cho những tác vụ cơ bản"
|
|
||||||
},
|
|
||||||
"createNew": {
|
|
||||||
"title": "Tạo workflow mới",
|
|
||||||
"description": "Tạo workflow mới từ ban đầu"
|
|
||||||
},
|
|
||||||
"loadFromFile": {
|
|
||||||
"title": "Tải workflow từ tệp",
|
|
||||||
"description": "Tải lên workflow để bắt đầu với những thiết lập sẵn có"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"upscaling": {
|
|
||||||
"uploadImage": {
|
|
||||||
"title": "Tải Ảnh Để Upscale",
|
|
||||||
"description": "Nhấp hoặc kéo ảnh để upscale (JPG, PNG, WebP lên đến 100MB)"
|
|
||||||
},
|
|
||||||
"replaceImage": {
|
|
||||||
"title": "Thay Thế Ảnh Hiện Tại",
|
|
||||||
"description": "Nhấp hoặc kéo ảnh mới để thay thế cái hiện tại"
|
|
||||||
},
|
|
||||||
"imageReady": {
|
|
||||||
"title": "Ảnh Đã Sẵn Sàng",
|
|
||||||
"description": "Bấm 'Kích Hoạt' để chuẩn bị upscale"
|
|
||||||
},
|
|
||||||
"readyToUpscale": {
|
|
||||||
"title": "Chuẩn bị upscale!",
|
|
||||||
"description": "Điều chỉnh thiết lập bên dưới, sau đó bấm vào nút 'Khởi Động' để chuẩn bị upscale ảnh."
|
|
||||||
},
|
|
||||||
"upscaleModel": "Model Upscale",
|
|
||||||
"model": "Model",
|
|
||||||
"helpText": {
|
|
||||||
"promptAdvice": "Khi upscale, dùng lệnh để mô tả phương thức và phong cách. Tránh mô tả các chi tiết cụ thể trong ảnh.",
|
|
||||||
"styleAdvice": "Upscale thích hợp nhất cho phong cách chung của ảnh."
|
|
||||||
},
|
|
||||||
"scale": "Kích Thước"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"workflows": {
|
"workflows": {
|
||||||
@@ -2623,10 +2423,7 @@
|
|||||||
"postProcessingMissingModelWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model xử lý hậu kỳ (ảnh sang ảnh).",
|
"postProcessingMissingModelWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model xử lý hậu kỳ (ảnh sang ảnh).",
|
||||||
"missingModelsWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model cần thiết:",
|
"missingModelsWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model cần thiết:",
|
||||||
"incompatibleBaseModel": "Phiên bản model chính không được hỗ trợ để upscale",
|
"incompatibleBaseModel": "Phiên bản model chính không được hỗ trợ để upscale",
|
||||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale.",
|
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale."
|
||||||
"tileControl": "Điều Chỉnh Khối",
|
|
||||||
"tileSize": "Kích Thước Khối",
|
|
||||||
"tileOverlap": "Chồng Chéo Khối"
|
|
||||||
},
|
},
|
||||||
"newUserExperience": {
|
"newUserExperience": {
|
||||||
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
|
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
|
||||||
@@ -2642,10 +2439,8 @@
|
|||||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||||
"items": [
|
"items": [
|
||||||
"Thiết lập mới để gửi các sản phẩm tạo sinh từ Canvas trực tiếp đến Thư Viện Ảnh.",
|
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||||
"Chức năng mới Đảo Ngược Lớp Phủ (Shift+V) và khả năng Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ (Shift+B).",
|
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||||
"Mở rộng hỗ trợ cho Ảnh Minh Hoạ và thiết lập model.",
|
|
||||||
"Nhiều bản cập nhật và sửa lỗi chất lượng"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"upsell": {
|
"upsell": {
|
||||||
@@ -2657,18 +2452,64 @@
|
|||||||
"supportVideos": {
|
"supportVideos": {
|
||||||
"supportVideos": "Video Hỗ Trợ",
|
"supportVideos": "Video Hỗ Trợ",
|
||||||
"gettingStarted": "Bắt Đầu Làm Quen",
|
"gettingStarted": "Bắt Đầu Làm Quen",
|
||||||
"watch": "Xem",
|
"studioSessionsDesc1": "Xem thử <StudioSessionsPlaylistLink /> để hiểu rõ Invoke hơn.",
|
||||||
"studioSessionsDesc": "Tham gia <DiscordLink /> để xem các buổi phát trực tiếp và đặt câu hỏi. Các phiên được đăng lên trên playlist các tuần tiếp theo.",
|
"studioSessionsDesc2": "Đến <DiscordLink /> để tham gia vào phiên trực tiếp và hỏi câu hỏi. Các phiên được tải lên danh sách phát vào các tuần.",
|
||||||
"videos": {
|
"videos": {
|
||||||
"gettingStarted": {
|
"howDoIDoImageToImageTransformation": {
|
||||||
"title": "Bắt Đầu Với Invoke",
|
"title": "Làm Sao Để Tôi Dùng Trình Biến Đổi Hình Ảnh Sang Hình Ảnh?",
|
||||||
"description": "Hoàn thành các video bao hàm mọi thứ bạn cần biết để bắt đầu với Invoke, từ tạo bức ảnh đầu tiên đến các kỹ thuật phức tạp khác."
|
"description": "Hướng dẫn cách thực hiện biến đổi ảnh sang ảnh trong Invoke."
|
||||||
},
|
},
|
||||||
"studioSessions": {
|
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||||
"title": "Phiên Studio",
|
"description": "Giới thiệu về ảnh mẫu và IP adapter toàn vùng.",
|
||||||
"description": "Đào sâu vào các phiên họp để khám phá những tính năng nâng cao của Invoke, sáng tạo workflow, và thảo luận cộng đồng."
|
"title": "Làm Sao Để Tôi Dùng IP Adapter Toàn Vùng Và Ảnh Mẫu?"
|
||||||
|
},
|
||||||
|
"creatingAndComposingOnInvokesControlCanvas": {
|
||||||
|
"description": "Học cách sáng tạo ảnh bằng trình điều khiển canvas của Invoke.",
|
||||||
|
"title": "Sáng Tạo Trong Trình Kiểm Soát Canvas Của Invoke"
|
||||||
|
},
|
||||||
|
"upscaling": {
|
||||||
|
"description": "Cách upscale ảnh bằng bộ công cụ của Invoke để nâng cấp độ phân giải.",
|
||||||
|
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
|
||||||
|
},
|
||||||
|
"howDoIGenerateAndSaveToTheGallery": {
|
||||||
|
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện Ảnh?",
|
||||||
|
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện ảnh."
|
||||||
|
},
|
||||||
|
"howDoIEditOnTheCanvas": {
|
||||||
|
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
|
||||||
|
"title": "Làm Sao Để Tôi Chỉnh Sửa Trên Canvas?"
|
||||||
|
},
|
||||||
|
"howDoIUseControlNetsAndControlLayers": {
|
||||||
|
"title": "Làm Sao Để Tôi Dùng ControlNet và Layer Điều Khiển Được?",
|
||||||
|
"description": "Học cách áp dụng layer điều khiển được và controlnet vào ảnh của bạn."
|
||||||
|
},
|
||||||
|
"howDoIUseInpaintMasks": {
|
||||||
|
"title": "Làm Sao Để Tôi Dùng Lớp Phủ Inpaint?",
|
||||||
|
"description": "Cách áp dụng lớp phủ inpaint vào chỉnh sửa và thay đổi ảnh."
|
||||||
|
},
|
||||||
|
"howDoIOutpaint": {
|
||||||
|
"title": "Làm Sao Để Tôi Outpaint?",
|
||||||
|
"description": "Hướng dẫn outpaint bên ngoài viền ảnh gốc."
|
||||||
|
},
|
||||||
|
"creatingYourFirstImage": {
|
||||||
|
"description": "Giới thiệu về cách tạo ảnh từ ban đầu bằng công cụ Invoke.",
|
||||||
|
"title": "Tạo Hình Ảnh Đầu Tiên Của Bạn"
|
||||||
|
},
|
||||||
|
"usingControlLayersAndReferenceGuides": {
|
||||||
|
"description": "Học cách chỉ dẫn ảnh được tạo ra bằng layer điều khiển được và ảnh mẫu.",
|
||||||
|
"title": "Dùng Layer Điều Khiển Được và Chỉ Dẫn Mẫu"
|
||||||
|
},
|
||||||
|
"understandingImageToImageAndDenoising": {
|
||||||
|
"title": "Hiểu Rõ Trình Hình Ảnh Sang Hình Ảnh Và Trình Khử Nhiễu",
|
||||||
|
"description": "Tổng quan về trình biến đổi ảnh sang ảnh và trình khử nhiễu trong Invoke."
|
||||||
|
},
|
||||||
|
"exploringAIModelsAndConceptAdapters": {
|
||||||
|
"title": "Khám Phá Model AI Và Khái Niệm Về Adapter",
|
||||||
|
"description": "Đào sâu vào model AI và cách dùng những adapter để điều khiển một cách sáng tạo."
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
"controlCanvas": "Điều Khiển Canvas",
|
||||||
|
"watch": "Xem"
|
||||||
},
|
},
|
||||||
"modelCache": {
|
"modelCache": {
|
||||||
"clearSucceeded": "Cache Model Đã Được Dọn",
|
"clearSucceeded": "Cache Model Đã Được Dọn",
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ import { useStore } from '@nanostores/react';
|
|||||||
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
|
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
|
||||||
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
|
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
|
||||||
import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction';
|
import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||||
import { clearStorage } from 'app/store/enhancers/reduxRemember/driver';
|
|
||||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||||
import Loading from 'common/components/Loading/Loading';
|
import Loading from 'common/components/Loading/Loading';
|
||||||
|
import { useClearStorage } from 'common/hooks/useClearStorage';
|
||||||
import { AppContent } from 'features/ui/components/AppContent';
|
import { AppContent } from 'features/ui/components/AppContent';
|
||||||
import { memo, useCallback } from 'react';
|
import { memo, useCallback } from 'react';
|
||||||
import { ErrorBoundary } from 'react-error-boundary';
|
import { ErrorBoundary } from 'react-error-boundary';
|
||||||
@@ -21,24 +21,25 @@ interface Props {
|
|||||||
|
|
||||||
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||||
const didStudioInit = useStore($didStudioInit);
|
const didStudioInit = useStore($didStudioInit);
|
||||||
|
const clearStorage = useClearStorage();
|
||||||
|
|
||||||
const handleReset = useCallback(() => {
|
const handleReset = useCallback(() => {
|
||||||
clearStorage();
|
clearStorage();
|
||||||
location.reload();
|
location.reload();
|
||||||
return false;
|
return false;
|
||||||
}, []);
|
}, [clearStorage]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ThemeLocaleProvider>
|
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
<ThemeLocaleProvider>
|
||||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||||
<AppContent />
|
<AppContent />
|
||||||
{!didStudioInit && <Loading />}
|
{!didStudioInit && <Loading />}
|
||||||
</Box>
|
</Box>
|
||||||
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
|
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
|
||||||
<GlobalModalIsolator />
|
<GlobalModalIsolator />
|
||||||
</ErrorBoundary>
|
</ThemeLocaleProvider>
|
||||||
</ThemeLocaleProvider>
|
</ErrorBoundary>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
|||||||
import { setupListeners } from '@reduxjs/toolkit/query';
|
import { setupListeners } from '@reduxjs/toolkit/query';
|
||||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||||
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
|
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||||
import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection';
|
|
||||||
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
|
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
|
||||||
import { useLogger } from 'app/logging/useLogger';
|
import { useLogger } from 'app/logging/useLogger';
|
||||||
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
||||||
@@ -16,8 +15,6 @@ import { useDndMonitor } from 'features/dnd/useDndMonitor';
|
|||||||
import { useDynamicPromptsWatcher } from 'features/dynamicPrompts/hooks/useDynamicPromptsWatcher';
|
import { useDynamicPromptsWatcher } from 'features/dynamicPrompts/hooks/useDynamicPromptsWatcher';
|
||||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||||
import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
|
import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
|
||||||
import { useSyncExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
|
|
||||||
import { useSyncNodeErrors } from 'features/nodes/store/util/fieldValidators';
|
|
||||||
import { useReadinessWatcher } from 'features/queue/store/readiness';
|
import { useReadinessWatcher } from 'features/queue/store/readiness';
|
||||||
import { configChanged } from 'features/system/store/configSlice';
|
import { configChanged } from 'features/system/store/configSlice';
|
||||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||||
@@ -50,13 +47,10 @@ export const GlobalHookIsolator = memo(
|
|||||||
useCloseChakraTooltipsOnDragFix();
|
useCloseChakraTooltipsOnDragFix();
|
||||||
useNavigationApi();
|
useNavigationApi();
|
||||||
useDndMonitor();
|
useDndMonitor();
|
||||||
useSyncNodeErrors();
|
|
||||||
useSyncLangDirection();
|
|
||||||
|
|
||||||
// Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
|
// Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
|
||||||
// and/or in progress canvas sessions.
|
// and/or in progress canvas sessions.
|
||||||
useGetQueueCountsByDestinationQuery(queueCountArg);
|
useGetQueueCountsByDestinationQuery(queueCountArg);
|
||||||
useSyncExecutionState();
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
i18n.changeLanguage(language);
|
i18n.changeLanguage(language);
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
|
import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
|
||||||
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
||||||
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
|
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
|
||||||
|
import {
|
||||||
|
NewCanvasSessionDialog,
|
||||||
|
NewGallerySessionDialog,
|
||||||
|
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||||
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
|
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
|
||||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||||
@@ -46,6 +50,8 @@ export const GlobalModalIsolator = memo(() => {
|
|||||||
<RefreshAfterResetModal />
|
<RefreshAfterResetModal />
|
||||||
<DeleteBoardModal />
|
<DeleteBoardModal />
|
||||||
<GlobalImageHotkeys />
|
<GlobalImageHotkeys />
|
||||||
|
<NewGallerySessionDialog />
|
||||||
|
<NewCanvasSessionDialog />
|
||||||
<ImageContextMenu />
|
<ImageContextMenu />
|
||||||
<FullscreenDropzone />
|
<FullscreenDropzone />
|
||||||
<VideosModal />
|
<VideosModal />
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
|||||||
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
|
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
|
||||||
import type { LoggingOverrides } from 'app/logging/logger';
|
import type { LoggingOverrides } from 'app/logging/logger';
|
||||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||||
import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver';
|
|
||||||
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
|
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
|
||||||
import { $authToken } from 'app/store/nanostores/authToken';
|
import { $authToken } from 'app/store/nanostores/authToken';
|
||||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||||
@@ -36,7 +35,7 @@ import {
|
|||||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||||
import type { ToastConfig } from 'features/toast/toast';
|
import type { ToastConfig } from 'features/toast/toast';
|
||||||
import type { PropsWithChildren, ReactNode } from 'react';
|
import type { PropsWithChildren, ReactNode } from 'react';
|
||||||
import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react';
|
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||||
import { Provider } from 'react-redux';
|
import { Provider } from 'react-redux';
|
||||||
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
||||||
import { $socketOptions } from 'services/events/stores';
|
import { $socketOptions } from 'services/events/stores';
|
||||||
@@ -71,7 +70,6 @@ interface Props extends PropsWithChildren {
|
|||||||
* If provided, overrides in-app navigation to the model manager
|
* If provided, overrides in-app navigation to the model manager
|
||||||
*/
|
*/
|
||||||
onClickGoToModelManager?: () => void;
|
onClickGoToModelManager?: () => void;
|
||||||
storagePersistThrottle?: number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const InvokeAIUI = ({
|
const InvokeAIUI = ({
|
||||||
@@ -98,11 +96,7 @@ const InvokeAIUI = ({
|
|||||||
loggingOverrides,
|
loggingOverrides,
|
||||||
onClickGoToModelManager,
|
onClickGoToModelManager,
|
||||||
whatsNew,
|
whatsNew,
|
||||||
storagePersistThrottle = 2000,
|
|
||||||
}: Props) => {
|
}: Props) => {
|
||||||
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
|
|
||||||
const [didRehydrate, setDidRehydrate] = useState(false);
|
|
||||||
|
|
||||||
useLayoutEffect(() => {
|
useLayoutEffect(() => {
|
||||||
/*
|
/*
|
||||||
* We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
|
* We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
|
||||||
@@ -314,30 +308,22 @@ const InvokeAIUI = ({
|
|||||||
};
|
};
|
||||||
}, [isDebugging]);
|
}, [isDebugging]);
|
||||||
|
|
||||||
|
const store = useMemo(() => {
|
||||||
|
return createStore(projectId);
|
||||||
|
}, [projectId]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const onRehydrated = () => {
|
|
||||||
setDidRehydrate(true);
|
|
||||||
};
|
|
||||||
const store = createStore({ persist: true, persistThrottle: storagePersistThrottle, onRehydrated });
|
|
||||||
setStore(store);
|
|
||||||
$store.set(store);
|
$store.set(store);
|
||||||
if (import.meta.env.MODE === 'development') {
|
if (import.meta.env.MODE === 'development') {
|
||||||
window.$store = $store;
|
window.$store = $store;
|
||||||
}
|
}
|
||||||
const removeStorageListeners = addStorageListeners();
|
() => {
|
||||||
return () => {
|
|
||||||
removeStorageListeners();
|
|
||||||
setStore(undefined);
|
|
||||||
$store.set(undefined);
|
$store.set(undefined);
|
||||||
if (import.meta.env.MODE === 'development') {
|
if (import.meta.env.MODE === 'development') {
|
||||||
window.$store = undefined;
|
window.$store = undefined;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}, [storagePersistThrottle]);
|
}, [store]);
|
||||||
|
|
||||||
if (!store || !didRehydrate) {
|
|
||||||
return <Loading />;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<React.StrictMode>
|
<React.StrictMode>
|
||||||
|
|||||||
@@ -3,39 +3,43 @@ import 'overlayscrollbars/overlayscrollbars.css';
|
|||||||
import '@xyflow/react/dist/base.css';
|
import '@xyflow/react/dist/base.css';
|
||||||
import 'common/components/OverlayScrollbars/overlayscrollbars.css';
|
import 'common/components/OverlayScrollbars/overlayscrollbars.css';
|
||||||
|
|
||||||
import { ChakraProvider, DarkMode, extendTheme, theme as baseTheme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||||
import { useStore } from '@nanostores/react';
|
|
||||||
import { $direction } from 'app/hooks/useSyncLangDirection';
|
|
||||||
import type { ReactNode } from 'react';
|
import type { ReactNode } from 'react';
|
||||||
import { memo, useMemo } from 'react';
|
import { memo, useEffect, useMemo } from 'react';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
|
||||||
type ThemeLocaleProviderProps = {
|
type ThemeLocaleProviderProps = {
|
||||||
children: ReactNode;
|
children: ReactNode;
|
||||||
};
|
};
|
||||||
|
|
||||||
const buildTheme = (direction: 'ltr' | 'rtl') => {
|
|
||||||
return extendTheme({
|
|
||||||
...baseTheme,
|
|
||||||
direction,
|
|
||||||
shadows: {
|
|
||||||
...baseTheme.shadows,
|
|
||||||
selected:
|
|
||||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
|
||||||
hoverSelected:
|
|
||||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
|
||||||
hoverUnselected:
|
|
||||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
|
||||||
selectedForCompare:
|
|
||||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
|
||||||
hoverSelectedForCompare:
|
|
||||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
|
||||||
},
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
|
function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
|
||||||
const direction = useStore($direction);
|
const { i18n } = useTranslation();
|
||||||
const theme = useMemo(() => buildTheme(direction), [direction]);
|
|
||||||
|
const direction = i18n.dir();
|
||||||
|
|
||||||
|
const theme = useMemo(() => {
|
||||||
|
return extendTheme({
|
||||||
|
..._theme,
|
||||||
|
direction,
|
||||||
|
shadows: {
|
||||||
|
..._theme.shadows,
|
||||||
|
selected:
|
||||||
|
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||||
|
hoverSelected:
|
||||||
|
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||||
|
hoverUnselected:
|
||||||
|
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||||
|
selectedForCompare:
|
||||||
|
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||||
|
hoverSelectedForCompare:
|
||||||
|
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}, [direction]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
document.body.dir = direction;
|
||||||
|
}, [direction]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ChakraProvider theme={theme} toastOptions={TOAST_OPTIONS}>
|
<ChakraProvider theme={theme} toastOptions={TOAST_OPTIONS}>
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/st
|
|||||||
import { toast } from 'features/toast/toast';
|
import { toast } from 'features/toast/toast';
|
||||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||||
import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
|
import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
|
||||||
|
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||||
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
||||||
import { atom } from 'nanostores';
|
import { atom } from 'nanostores';
|
||||||
import { useCallback, useEffect } from 'react';
|
import { useCallback, useEffect } from 'react';
|
||||||
@@ -164,6 +165,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
|||||||
// Go to the generate tab, open the launchpad
|
// Go to the generate tab, open the launchpad
|
||||||
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
|
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
|
||||||
store.dispatch(paramsReset());
|
store.dispatch(paramsReset());
|
||||||
|
store.dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||||
break;
|
break;
|
||||||
case 'canvas':
|
case 'canvas':
|
||||||
// Go to the canvas tab, open the launchpad
|
// Go to the canvas tab, open the launchpad
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
|
||||||
import { atom } from 'nanostores';
|
|
||||||
import { useEffect } from 'react';
|
|
||||||
import { useTranslation } from 'react-i18next';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Global atom storing the language direction, to be consumed by the Chakra theme.
|
|
||||||
*
|
|
||||||
* Why do we need this? We have a kind of catch-22:
|
|
||||||
* - The Chakra theme needs to know the language direction to apply the correct styles.
|
|
||||||
* - The language direction is determined by i18n and the language selection.
|
|
||||||
* - We want our error boundary to be themed.
|
|
||||||
* - It's possible that i18n can throw if the language selection is invalid or not supported.
|
|
||||||
*
|
|
||||||
* Previously, we had the logic in this file in the theme provider, which wrapped the error boundary. The error
|
|
||||||
* was properly themed. But then, if i18n threw in the theme provider, the error boundary does not catch the
|
|
||||||
* error. The app would crash to a white screen.
|
|
||||||
*
|
|
||||||
* We tried swapping the component hierarchy so that the error boundary wraps the theme provider, but then the
|
|
||||||
* error boundary isn't themed!
|
|
||||||
*
|
|
||||||
* The solution is to move this i18n direction logic out of the theme provider and into a hook that we can use
|
|
||||||
* within the error boundary. The error boundary will be themed, _and_ catch any i18n errors.
|
|
||||||
*/
|
|
||||||
export const $direction = atom<'ltr' | 'rtl'>('ltr');
|
|
||||||
|
|
||||||
export const useSyncLangDirection = () => {
|
|
||||||
useAssertSingleton('useSyncLangDirection');
|
|
||||||
const { i18n, t } = useTranslation();
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const direction = i18n.dir();
|
|
||||||
$direction.set(direction);
|
|
||||||
document.body.dir = direction;
|
|
||||||
}, [i18n, t]);
|
|
||||||
};
|
|
||||||
@@ -2,7 +2,7 @@ import { createLogWriter } from '@roarr/browser-log-writer';
|
|||||||
import { atom } from 'nanostores';
|
import { atom } from 'nanostores';
|
||||||
import type { Logger, MessageSerializer } from 'roarr';
|
import type { Logger, MessageSerializer } from 'roarr';
|
||||||
import { ROARR, Roarr } from 'roarr';
|
import { ROARR, Roarr } from 'roarr';
|
||||||
import { z } from 'zod';
|
import { z } from 'zod/v4';
|
||||||
|
|
||||||
const serializeMessage: MessageSerializer = (message) => {
|
const serializeMessage: MessageSerializer = (message) => {
|
||||||
return JSON.stringify(message);
|
return JSON.stringify(message);
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
|
export const STORAGE_PREFIX = '@@invokeai-';
|
||||||
export const EMPTY_ARRAY = [];
|
export const EMPTY_ARRAY = [];
|
||||||
export const EMPTY_OBJECT = {};
|
export const EMPTY_OBJECT = {};
|
||||||
|
|||||||
@@ -1,209 +1,40 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
|
||||||
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
|
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
|
||||||
import { $authToken } from 'app/store/nanostores/authToken';
|
|
||||||
import { $projectId } from 'app/store/nanostores/projectId';
|
import { $projectId } from 'app/store/nanostores/projectId';
|
||||||
import { $queueId } from 'app/store/nanostores/queueId';
|
|
||||||
import type { UseStore } from 'idb-keyval';
|
import type { UseStore } from 'idb-keyval';
|
||||||
import { createStore as idbCreateStore, del as idbDel, get as idbGet } from 'idb-keyval';
|
import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
|
||||||
|
import { atom } from 'nanostores';
|
||||||
import type { Driver } from 'redux-remember';
|
import type { Driver } from 'redux-remember';
|
||||||
import { serializeError } from 'serialize-error';
|
|
||||||
import { buildV1Url, getBaseUrl } from 'services/api';
|
|
||||||
import type { JsonObject } from 'type-fest';
|
|
||||||
|
|
||||||
const log = logger('system');
|
// Create a custom idb-keyval store (just needed to customize the name)
|
||||||
|
const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
|
||||||
|
|
||||||
const getUrl = (endpoint: 'get_by_key' | 'set_by_key' | 'delete', key?: string) => {
|
export const clearIdbKeyValStore = () => {
|
||||||
const baseUrl = getBaseUrl();
|
clear($idbKeyValStore.get());
|
||||||
const query: Record<string, string> = {};
|
|
||||||
if (key) {
|
|
||||||
query['key'] = key;
|
|
||||||
}
|
|
||||||
|
|
||||||
const path = buildV1Url(`client_state/${$queueId.get()}/${endpoint}`, query);
|
|
||||||
const url = `${baseUrl}/${path}`;
|
|
||||||
return url;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const getHeaders = () => {
|
// Create redux-remember driver, wrapping idb-keyval
|
||||||
const headers = new Headers();
|
export const idbKeyValDriver: Driver = {
|
||||||
const authToken = $authToken.get();
|
getItem: (key) => {
|
||||||
const projectId = $projectId.get();
|
try {
|
||||||
if (authToken) {
|
return get(key, $idbKeyValStore.get());
|
||||||
headers.set('Authorization', `Bearer ${authToken}`);
|
} catch (originalError) {
|
||||||
}
|
throw new StorageError({
|
||||||
if (projectId) {
|
key,
|
||||||
headers.set('project-id', projectId);
|
projectId: $projectId.get(),
|
||||||
}
|
originalError,
|
||||||
return headers;
|
});
|
||||||
};
|
}
|
||||||
|
},
|
||||||
// Persistence happens per slice. To track when persistence is in progress, maintain a ref count, incrementing
|
setItem: (key, value) => {
|
||||||
// it when a slice is being persisted and decrementing it when the persistence is done.
|
try {
|
||||||
let persistRefCount = 0;
|
return set(key, value, $idbKeyValStore.get());
|
||||||
|
} catch (originalError) {
|
||||||
// Keep track of the last persisted state for each key to avoid unnecessary network requests.
|
throw new StorageError({
|
||||||
//
|
key,
|
||||||
// `redux-remember` persists individual slices of state, so we can implicity denylist a slice by not giving it a
|
value,
|
||||||
// persist config.
|
projectId: $projectId.get(),
|
||||||
//
|
originalError,
|
||||||
// However, we may need to avoid persisting individual _fields_ of a slice. `redux-remember` does not provide a
|
});
|
||||||
// way to do this directly.
|
}
|
||||||
//
|
},
|
||||||
// To accomplish this, we add a layer of logic on top of the `redux-remember`. In the state serializer function
|
|
||||||
// provided to `redux-remember`, we can omit certain fields from the state that we do not want to persist. See
|
|
||||||
// the implementation in `store.ts` for this logic.
|
|
||||||
//
|
|
||||||
// This logic is unknown to `redux-remember`. When an omitted field changes, it will still attempt to persist the
|
|
||||||
// whole slice, even if the final, _serialized_ slice value is unchanged.
|
|
||||||
//
|
|
||||||
// To avoid unnecessary network requests, we keep track of the last persisted state for each key in this map.
|
|
||||||
// If the value to be persisted is the same as the last persisted value, we will skip the network request.
|
|
||||||
const lastPersistedState = new Map<string, string | undefined>();
|
|
||||||
|
|
||||||
// As of v6.3.0, we use server-backed storage for client state. This replaces the previous IndexedDB-based storage,
|
|
||||||
// which was implemented using `idb-keyval`.
|
|
||||||
//
|
|
||||||
// To facilitate a smooth transition, we implement a migration strategy that attempts to retrieve values from IndexedDB
|
|
||||||
// and persist them to the new server-backed storage. This is done on a best-effort basis.
|
|
||||||
|
|
||||||
// These constants were used in the previous IndexedDB-based storage implementation.
|
|
||||||
const IDB_DB_NAME = 'invoke';
|
|
||||||
const IDB_STORE_NAME = 'invoke-store';
|
|
||||||
const IDB_STORAGE_PREFIX = '@@invokeai-';
|
|
||||||
|
|
||||||
// Lazy store creation
|
|
||||||
let _idbKeyValStore: UseStore | null = null;
|
|
||||||
const getIdbKeyValStore = () => {
|
|
||||||
if (_idbKeyValStore === null) {
|
|
||||||
_idbKeyValStore = idbCreateStore(IDB_DB_NAME, IDB_STORE_NAME);
|
|
||||||
}
|
|
||||||
return _idbKeyValStore;
|
|
||||||
};
|
|
||||||
|
|
||||||
const getIdbKey = (key: string) => {
|
|
||||||
return `${IDB_STORAGE_PREFIX}${key}`;
|
|
||||||
};
|
|
||||||
|
|
||||||
const getItem = async (key: string) => {
|
|
||||||
try {
|
|
||||||
const url = getUrl('get_by_key', key);
|
|
||||||
const headers = getHeaders();
|
|
||||||
const res = await fetch(url, { method: 'GET', headers });
|
|
||||||
if (!res.ok) {
|
|
||||||
throw new Error(`Response status: ${res.status}`);
|
|
||||||
}
|
|
||||||
const value = await res.json();
|
|
||||||
|
|
||||||
// Best-effort migration from IndexedDB to the new storage system
|
|
||||||
log.trace({ key, value }, 'Server-backed storage value retrieved');
|
|
||||||
|
|
||||||
if (!value) {
|
|
||||||
const idbKey = getIdbKey(key);
|
|
||||||
try {
|
|
||||||
// It's a bit tricky to query IndexedDB directly to check if value exists, so we use `idb-keyval` to do it.
|
|
||||||
// Thing is, `idb-keyval` requires you to create a store to query it. End result - we are creating a store
|
|
||||||
// even if we don't use it for anything besides checking if the key is present.
|
|
||||||
const idbKeyValStore = getIdbKeyValStore();
|
|
||||||
const idbValue = await idbGet(idbKey, idbKeyValStore);
|
|
||||||
if (idbValue) {
|
|
||||||
log.debug(
|
|
||||||
{ key, idbKey, idbValue },
|
|
||||||
'No value in server-backed storage, but found value in IndexedDB - attempting migration'
|
|
||||||
);
|
|
||||||
await idbDel(idbKey, idbKeyValStore);
|
|
||||||
await setItem(key, idbValue);
|
|
||||||
log.debug({ key, idbKey, idbValue }, 'Migration successful');
|
|
||||||
return idbValue;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Just log if IndexedDB retrieval fails - this is a best-effort migration.
|
|
||||||
log.debug(
|
|
||||||
{ key, idbKey, error: serializeError(error) } as JsonObject,
|
|
||||||
'Error checking for or migrating from IndexedDB'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lastPersistedState.set(key, value);
|
|
||||||
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Getting state for ${key}`);
|
|
||||||
return value;
|
|
||||||
} catch (originalError) {
|
|
||||||
throw new StorageError({
|
|
||||||
key,
|
|
||||||
projectId: $projectId.get(),
|
|
||||||
originalError,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const setItem = async (key: string, value: string) => {
|
|
||||||
try {
|
|
||||||
persistRefCount++;
|
|
||||||
if (lastPersistedState.get(key) === value) {
|
|
||||||
log.trace(
|
|
||||||
{ key, last: lastPersistedState.get(key), next: value },
|
|
||||||
`Skipping persist for ${key} as value is unchanged`
|
|
||||||
);
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Persisting state for ${key}`);
|
|
||||||
const url = getUrl('set_by_key', key);
|
|
||||||
const headers = getHeaders();
|
|
||||||
const res = await fetch(url, { method: 'POST', headers, body: value });
|
|
||||||
if (!res.ok) {
|
|
||||||
throw new Error(`Response status: ${res.status}`);
|
|
||||||
}
|
|
||||||
const resultValue = await res.json();
|
|
||||||
lastPersistedState.set(key, resultValue);
|
|
||||||
return resultValue;
|
|
||||||
} catch (originalError) {
|
|
||||||
throw new StorageError({
|
|
||||||
key,
|
|
||||||
value,
|
|
||||||
projectId: $projectId.get(),
|
|
||||||
originalError,
|
|
||||||
});
|
|
||||||
} finally {
|
|
||||||
persistRefCount--;
|
|
||||||
if (persistRefCount < 0) {
|
|
||||||
log.trace('Persist ref count is negative, resetting to 0');
|
|
||||||
persistRefCount = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export const reduxRememberDriver: Driver = { getItem, setItem };
|
|
||||||
|
|
||||||
export const clearStorage = async () => {
|
|
||||||
try {
|
|
||||||
persistRefCount++;
|
|
||||||
const url = getUrl('delete');
|
|
||||||
const headers = getHeaders();
|
|
||||||
const res = await fetch(url, { method: 'POST', headers });
|
|
||||||
if (!res.ok) {
|
|
||||||
throw new Error(`Response status: ${res.status}`);
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
log.error('Failed to reset client state');
|
|
||||||
} finally {
|
|
||||||
persistRefCount--;
|
|
||||||
lastPersistedState.clear();
|
|
||||||
if (persistRefCount < 0) {
|
|
||||||
log.trace('Persist ref count is negative, resetting to 0');
|
|
||||||
persistRefCount = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export const addStorageListeners = () => {
|
|
||||||
const onBeforeUnload = (e: BeforeUnloadEvent) => {
|
|
||||||
if (persistRefCount > 0) {
|
|
||||||
e.preventDefault();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
window.addEventListener('beforeunload', onBeforeUnload);
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
window.removeEventListener('beforeunload', onBeforeUnload);
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -33,9 +33,8 @@ export class StorageError extends Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const log = logger('system');
|
|
||||||
|
|
||||||
export const errorHandler = (err: PersistError | RehydrateError) => {
|
export const errorHandler = (err: PersistError | RehydrateError) => {
|
||||||
|
const log = logger('system');
|
||||||
if (err instanceof PersistError) {
|
if (err instanceof PersistError) {
|
||||||
log.error({ error: serializeError(err) }, 'Problem persisting state');
|
log.error({ error: serializeError(err) }, 'Problem persisting state');
|
||||||
} else if (err instanceof RehydrateError) {
|
} else if (err instanceof RehydrateError) {
|
||||||
|
|||||||
@@ -0,0 +1,73 @@
|
|||||||
|
import type { TypedStartListening } from '@reduxjs/toolkit';
|
||||||
|
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
|
||||||
|
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
|
||||||
|
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
|
||||||
|
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
|
||||||
|
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||||
|
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
|
||||||
|
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
|
||||||
|
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
|
||||||
|
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
|
||||||
|
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
|
||||||
|
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
|
||||||
|
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
|
||||||
|
import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded';
|
||||||
|
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
|
||||||
|
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
|
||||||
|
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
|
||||||
|
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
|
||||||
|
import type { AppDispatch, RootState } from 'app/store/store';
|
||||||
|
|
||||||
|
import { addArchivedOrDeletedBoardListener } from './listeners/addArchivedOrDeletedBoardListener';
|
||||||
|
|
||||||
|
export const listenerMiddleware = createListenerMiddleware();
|
||||||
|
|
||||||
|
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
||||||
|
|
||||||
|
const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
||||||
|
|
||||||
|
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The RTK listener middleware is a lightweight alternative sagas/observables.
|
||||||
|
*
|
||||||
|
* Most side effect logic should live in a listener.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Image uploaded
|
||||||
|
addImageUploadedFulfilledListener(startAppListening);
|
||||||
|
|
||||||
|
// Image deleted
|
||||||
|
addDeleteBoardAndImagesFulfilledListener(startAppListening);
|
||||||
|
|
||||||
|
// User Invoked
|
||||||
|
addAnyEnqueuedListener(startAppListening);
|
||||||
|
addBatchEnqueuedListener(startAppListening);
|
||||||
|
|
||||||
|
// Socket.IO
|
||||||
|
addSocketConnectedEventListener(startAppListening);
|
||||||
|
|
||||||
|
// Gallery bulk download
|
||||||
|
addBulkDownloadListeners(startAppListening);
|
||||||
|
|
||||||
|
// Boards
|
||||||
|
addImageAddedToBoardFulfilledListener(startAppListening);
|
||||||
|
addImageRemovedFromBoardFulfilledListener(startAppListening);
|
||||||
|
addBoardIdSelectedListener(startAppListening);
|
||||||
|
addArchivedOrDeletedBoardListener(startAppListening);
|
||||||
|
|
||||||
|
// Node schemas
|
||||||
|
addGetOpenAPISchemaListener(startAppListening);
|
||||||
|
|
||||||
|
// Models
|
||||||
|
addModelSelectedListener(startAppListening);
|
||||||
|
|
||||||
|
// app startup
|
||||||
|
addAppStartedListener(startAppListening);
|
||||||
|
addModelsLoadedListener(startAppListening);
|
||||||
|
addAppConfigReceivedListener(startAppListening);
|
||||||
|
|
||||||
|
// Ad-hoc upscale workflwo
|
||||||
|
addAdHocPostProcessingRequestedListener(startAppListening);
|
||||||
|
|
||||||
|
addSetDefaultSettingsListener(startAppListening);
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import { createAction } from '@reduxjs/toolkit';
|
import { createAction } from '@reduxjs/toolkit';
|
||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||||
import { toast } from 'features/toast/toast';
|
import { toast } from 'features/toast/toast';
|
||||||
import { t } from 'i18next';
|
import { t } from 'i18next';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { isAnyOf } from '@reduxjs/toolkit';
|
import { isAnyOf } from '@reduxjs/toolkit';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||||
import {
|
import {
|
||||||
autoAddBoardIdChanged,
|
autoAddBoardIdChanged,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
|
import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
|
||||||
|
|
||||||
export const addAnyEnqueuedListener = (startAppListening: AppStartListening) => {
|
export const addAnyEnqueuedListener = (startAppListening: AppStartListening) => {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
|
import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
|
||||||
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
|
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
|
||||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { createAction } from '@reduxjs/toolkit';
|
import { createAction } from '@reduxjs/toolkit';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { truncate } from 'es-toolkit/compat';
|
import { truncate } from 'es-toolkit/compat';
|
||||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||||
import { toast } from 'features/toast/toast';
|
import { toast } from 'features/toast/toast';
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
||||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||||
import { getImageUsage } from 'features/deleteImageModal/store/state';
|
import { getImageUsage } from 'features/deleteImageModal/store/state';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { isAnyOf } from '@reduxjs/toolkit';
|
import { isAnyOf } from '@reduxjs/toolkit';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { toast } from 'features/toast/toast';
|
import { toast } from 'features/toast/toast';
|
||||||
import { t } from 'i18next';
|
import { t } from 'i18next';
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { parseify } from 'common/util/serialize';
|
import { parseify } from 'common/util/serialize';
|
||||||
import { size } from 'es-toolkit/compat';
|
import { size } from 'es-toolkit/compat';
|
||||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
|
|
||||||
const log = logger('gallery');
|
const log = logger('gallery');
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { imagesApi } from 'services/api/endpoints/images';
|
import { imagesApi } from 'services/api/endpoints/images';
|
||||||
|
|
||||||
const log = logger('gallery');
|
const log = logger('gallery');
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import { isAnyOf } from '@reduxjs/toolkit';
|
import { isAnyOf } from '@reduxjs/toolkit';
|
||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening, RootState } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
|
import type { RootState } from 'app/store/store';
|
||||||
import { omit } from 'es-toolkit/compat';
|
import { omit } from 'es-toolkit/compat';
|
||||||
import { imageUploadedClientSide } from 'features/gallery/store/actions';
|
import { imageUploadedClientSide } from 'features/gallery/store/actions';
|
||||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
||||||
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||||
import { modelChanged, syncedToOptimalDimension, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
import { modelChanged, syncedToOptimalDimension, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||||
import { refImageModelChanged, selectReferenceImageEntities } from 'features/controlLayers/store/refImagesSlice';
|
import { refImageModelChanged, selectReferenceImageEntities } from 'features/controlLayers/store/refImagesSlice';
|
||||||
@@ -152,8 +152,7 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
|||||||
if (modelBase !== state.params.model?.base) {
|
if (modelBase !== state.params.model?.base) {
|
||||||
// Sync generate tab settings whenever the model base changes
|
// Sync generate tab settings whenever the model base changes
|
||||||
dispatch(syncedToOptimalDimension());
|
dispatch(syncedToOptimalDimension());
|
||||||
const isStaging = buildSelectIsStaging(selectCanvasSessionId(state))(state);
|
if (!selectIsStaging(state)) {
|
||||||
if (!isStaging) {
|
|
||||||
// Canvas tab only syncs if not staging
|
// Canvas tab only syncs if not staging
|
||||||
dispatch(bboxSyncedToOptimalDimension());
|
dispatch(bboxSyncedToOptimalDimension());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
import type { AppDispatch, AppStartListening, RootState } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
|
import type { AppDispatch, RootState } from 'app/store/store';
|
||||||
import { controlLayerModelChanged, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
import { controlLayerModelChanged, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
||||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||||
import {
|
import {
|
||||||
@@ -14,11 +15,7 @@ import { refImageModelChanged, selectRefImagesSlice } from 'features/controlLaye
|
|||||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||||
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
|
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
|
||||||
import { modelSelected } from 'features/parameters/store/actions';
|
import { modelSelected } from 'features/parameters/store/actions';
|
||||||
import {
|
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
|
||||||
postProcessingModelChanged,
|
|
||||||
tileControlnetModelChanged,
|
|
||||||
upscaleModelChanged,
|
|
||||||
} from 'features/parameters/store/upscaleSlice';
|
|
||||||
import {
|
import {
|
||||||
zParameterCLIPEmbedModel,
|
zParameterCLIPEmbedModel,
|
||||||
zParameterSpandrelImageToImageModel,
|
zParameterSpandrelImageToImageModel,
|
||||||
@@ -31,7 +28,6 @@ import type { AnyModelConfig } from 'services/api/types';
|
|||||||
import {
|
import {
|
||||||
isCLIPEmbedModelConfig,
|
isCLIPEmbedModelConfig,
|
||||||
isControlLayerModelConfig,
|
isControlLayerModelConfig,
|
||||||
isControlNetModelConfig,
|
|
||||||
isFluxReduxModelConfig,
|
isFluxReduxModelConfig,
|
||||||
isFluxVAEModelConfig,
|
isFluxVAEModelConfig,
|
||||||
isIPAdapterModelConfig,
|
isIPAdapterModelConfig,
|
||||||
@@ -75,7 +71,6 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
|
|||||||
handleControlAdapterModels(models, state, dispatch, log);
|
handleControlAdapterModels(models, state, dispatch, log);
|
||||||
handlePostProcessingModel(models, state, dispatch, log);
|
handlePostProcessingModel(models, state, dispatch, log);
|
||||||
handleUpscaleModel(models, state, dispatch, log);
|
handleUpscaleModel(models, state, dispatch, log);
|
||||||
handleTileControlNetModel(models, state, dispatch, log);
|
|
||||||
handleIPAdapterModels(models, state, dispatch, log);
|
handleIPAdapterModels(models, state, dispatch, log);
|
||||||
handleT5EncoderModels(models, state, dispatch, log);
|
handleT5EncoderModels(models, state, dispatch, log);
|
||||||
handleCLIPEmbedModels(models, state, dispatch, log);
|
handleCLIPEmbedModels(models, state, dispatch, log);
|
||||||
@@ -350,46 +345,6 @@ const handleUpscaleModel: ModelHandler = (models, state, dispatch, log) => {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleTileControlNetModel: ModelHandler = (models, state, dispatch, log) => {
|
|
||||||
const selectedTileControlNetModel = state.upscale.tileControlnetModel;
|
|
||||||
const controlNetModels = models.filter(isControlNetModelConfig);
|
|
||||||
|
|
||||||
// If the currently selected model is available, we don't need to do anything
|
|
||||||
if (selectedTileControlNetModel && controlNetModels.some((m) => m.key === selectedTileControlNetModel.key)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The only way we have to identify a model as a tile model is by its name containing 'tile' :)
|
|
||||||
const tileModel = controlNetModels.find((m) => m.name.toLowerCase().includes('tile'));
|
|
||||||
|
|
||||||
// If we have a tile model, select it
|
|
||||||
if (tileModel) {
|
|
||||||
log.debug(
|
|
||||||
{ selectedTileControlNetModel, tileModel },
|
|
||||||
'No selected tile ControlNet model or selected model is not available, selecting tile model'
|
|
||||||
);
|
|
||||||
dispatch(tileControlnetModelChanged(tileModel));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, select the first available ControlNet model
|
|
||||||
const firstModel = controlNetModels[0] || null;
|
|
||||||
if (firstModel) {
|
|
||||||
log.debug(
|
|
||||||
{ selectedTileControlNetModel, firstModel },
|
|
||||||
'No tile ControlNet model found, selecting first available ControlNet model'
|
|
||||||
);
|
|
||||||
dispatch(tileControlnetModelChanged(firstModel));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// No available models, we should clear the selected model - but only if we have one selected
|
|
||||||
if (selectedTileControlNetModel) {
|
|
||||||
log.debug({ selectedTileControlNetModel }, 'Selected tile ControlNet model is not available, clearing');
|
|
||||||
dispatch(tileControlnetModelChanged(null));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
||||||
const selectedT5EncoderModel = state.params.t5EncoderModel;
|
const selectedT5EncoderModel = state.params.t5EncoderModel;
|
||||||
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfig(m));
|
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfig(m));
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import type { AppStartListening } from 'app/store/store';
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { isNil } from 'es-toolkit';
|
import { isNil } from 'es-toolkit';
|
||||||
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
|
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
|
||||||
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||||
import {
|
import {
|
||||||
heightChanged,
|
heightChanged,
|
||||||
setCfgRescaleMultiplier,
|
setCfgRescaleMultiplier,
|
||||||
@@ -115,8 +115,7 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
|
|||||||
}
|
}
|
||||||
const setSizeOptions = { updateAspectRatio: true, clamp: true };
|
const setSizeOptions = { updateAspectRatio: true, clamp: true };
|
||||||
|
|
||||||
const isStaging = buildSelectIsStaging(selectCanvasSessionId(state))(state);
|
const isStaging = selectIsStaging(getState());
|
||||||
|
|
||||||
const activeTab = selectActiveTab(getState());
|
const activeTab = selectActiveTab(getState());
|
||||||
if (activeTab === 'generate') {
|
if (activeTab === 'generate') {
|
||||||
if (isParameterWidth(width)) {
|
if (isParameterWidth(width)) {
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
import { objectEquals } from '@observ33r/object-equals';
|
import { objectEquals } from '@observ33r/object-equals';
|
||||||
import { createAction } from '@reduxjs/toolkit';
|
import { createAction } from '@reduxjs/toolkit';
|
||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
|
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||||
import type { AppStartListening } from 'app/store/store';
|
|
||||||
import { atom } from 'nanostores';
|
import { atom } from 'nanostores';
|
||||||
import { api } from 'services/api';
|
import { api } from 'services/api';
|
||||||
import { modelsApi } from 'services/api/endpoints/models';
|
import { modelsApi } from 'services/api/endpoints/models';
|
||||||
|
|||||||
@@ -1,219 +1,194 @@
|
|||||||
import type { ThunkDispatch, TypedStartListening, UnknownAction } from '@reduxjs/toolkit';
|
import type { ThunkDispatch, UnknownAction } from '@reduxjs/toolkit';
|
||||||
import { addListener, combineReducers, configureStore, createAction, createListenerMiddleware } from '@reduxjs/toolkit';
|
import { autoBatchEnhancer, combineReducers, configureStore } from '@reduxjs/toolkit';
|
||||||
import { logger } from 'app/logging/logger';
|
import { logger } from 'app/logging/logger';
|
||||||
|
import { idbKeyValDriver } from 'app/store/enhancers/reduxRemember/driver';
|
||||||
import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
||||||
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
|
|
||||||
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
|
|
||||||
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
|
|
||||||
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
|
||||||
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
|
|
||||||
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
|
|
||||||
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
|
|
||||||
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
|
|
||||||
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
|
|
||||||
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
|
|
||||||
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
|
|
||||||
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
|
|
||||||
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
|
|
||||||
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
|
|
||||||
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
|
|
||||||
import { deepClone } from 'common/util/deepClone';
|
import { deepClone } from 'common/util/deepClone';
|
||||||
import { keys, mergeWith, omit, pick } from 'es-toolkit/compat';
|
import { keys, mergeWith, omit, pick } from 'es-toolkit/compat';
|
||||||
import { changeBoardModalSliceConfig } from 'features/changeBoardModal/store/slice';
|
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
|
||||||
import { canvasSettingsSliceConfig } from 'features/controlLayers/store/canvasSettingsSlice';
|
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||||
import { canvasSliceConfig } from 'features/controlLayers/store/canvasSlice';
|
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
|
||||||
import { canvasSessionSliceConfig } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
import {
|
||||||
import { lorasSliceConfig } from 'features/controlLayers/store/lorasSlice';
|
canvasSessionSlice,
|
||||||
import { paramsSliceConfig } from 'features/controlLayers/store/paramsSlice';
|
canvasStagingAreaPersistConfig,
|
||||||
import { refImagesSliceConfig } from 'features/controlLayers/store/refImagesSlice';
|
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||||
import { dynamicPromptsSliceConfig } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
|
||||||
import { gallerySliceConfig } from 'features/gallery/store/gallerySlice';
|
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||||
import { modelManagerSliceConfig } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
import { refImagesPersistConfig, refImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
||||||
import { nodesSliceConfig } from 'features/nodes/store/nodesSlice';
|
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||||
import { workflowLibrarySliceConfig } from 'features/nodes/store/workflowLibrarySlice';
|
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
|
||||||
import { workflowSettingsSliceConfig } from 'features/nodes/store/workflowSettingsSlice';
|
import { modelManagerV2PersistConfig, modelManagerV2Slice } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||||
import { upscaleSliceConfig } from 'features/parameters/store/upscaleSlice';
|
import { nodesPersistConfig, nodesSlice, nodesUndoableConfig } from 'features/nodes/store/nodesSlice';
|
||||||
import { queueSliceConfig } from 'features/queue/store/queueSlice';
|
import { workflowLibraryPersistConfig, workflowLibrarySlice } from 'features/nodes/store/workflowLibrarySlice';
|
||||||
import { stylePresetSliceConfig } from 'features/stylePresets/store/stylePresetSlice';
|
import { workflowSettingsPersistConfig, workflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||||
import { configSliceConfig } from 'features/system/store/configSlice';
|
import { upscalePersistConfig, upscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||||
import { systemSliceConfig } from 'features/system/store/systemSlice';
|
import { queueSlice } from 'features/queue/store/queueSlice';
|
||||||
import { uiSliceConfig } from 'features/ui/store/uiSlice';
|
import { stylePresetPersistConfig, stylePresetSlice } from 'features/stylePresets/store/stylePresetSlice';
|
||||||
|
import { configSlice } from 'features/system/store/configSlice';
|
||||||
|
import { systemPersistConfig, systemSlice } from 'features/system/store/systemSlice';
|
||||||
|
import { uiPersistConfig, uiSlice } from 'features/ui/store/uiSlice';
|
||||||
import { diff } from 'jsondiffpatch';
|
import { diff } from 'jsondiffpatch';
|
||||||
import dynamicMiddlewares from 'redux-dynamic-middlewares';
|
import dynamicMiddlewares from 'redux-dynamic-middlewares';
|
||||||
import type { SerializeFunction, UnserializeFunction } from 'redux-remember';
|
import type { SerializeFunction, UnserializeFunction } from 'redux-remember';
|
||||||
import { REMEMBER_REHYDRATED, rememberEnhancer, rememberReducer } from 'redux-remember';
|
import { rememberEnhancer, rememberReducer } from 'redux-remember';
|
||||||
import undoable, { newHistory } from 'redux-undo';
|
import undoable from 'redux-undo';
|
||||||
import { serializeError } from 'serialize-error';
|
import { serializeError } from 'serialize-error';
|
||||||
import { api } from 'services/api';
|
import { api } from 'services/api';
|
||||||
import { authToastMiddleware } from 'services/api/authToastMiddleware';
|
import { authToastMiddleware } from 'services/api/authToastMiddleware';
|
||||||
import type { JsonObject } from 'type-fest';
|
import type { JsonObject } from 'type-fest';
|
||||||
|
|
||||||
import { reduxRememberDriver } from './enhancers/reduxRemember/driver';
|
import { STORAGE_PREFIX } from './constants';
|
||||||
import { actionSanitizer } from './middleware/devtools/actionSanitizer';
|
import { actionSanitizer } from './middleware/devtools/actionSanitizer';
|
||||||
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
|
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
|
||||||
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
|
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
|
||||||
import { addArchivedOrDeletedBoardListener } from './middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener';
|
import { listenerMiddleware } from './middleware/listenerMiddleware';
|
||||||
import { addImageUploadedFulfilledListener } from './middleware/listenerMiddleware/listeners/imageUploaded';
|
|
||||||
|
|
||||||
export const listenerMiddleware = createListenerMiddleware();
|
|
||||||
|
|
||||||
const log = logger('system');
|
const log = logger('system');
|
||||||
|
|
||||||
// When adding a slice, add the config to the SLICE_CONFIGS object below, then add the reducer to ALL_REDUCERS.
|
const allReducers = {
|
||||||
const SLICE_CONFIGS = {
|
|
||||||
[canvasSessionSliceConfig.slice.reducerPath]: canvasSessionSliceConfig,
|
|
||||||
[canvasSettingsSliceConfig.slice.reducerPath]: canvasSettingsSliceConfig,
|
|
||||||
[canvasSliceConfig.slice.reducerPath]: canvasSliceConfig,
|
|
||||||
[changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig,
|
|
||||||
[configSliceConfig.slice.reducerPath]: configSliceConfig,
|
|
||||||
[dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig,
|
|
||||||
[gallerySliceConfig.slice.reducerPath]: gallerySliceConfig,
|
|
||||||
[lorasSliceConfig.slice.reducerPath]: lorasSliceConfig,
|
|
||||||
[modelManagerSliceConfig.slice.reducerPath]: modelManagerSliceConfig,
|
|
||||||
[nodesSliceConfig.slice.reducerPath]: nodesSliceConfig,
|
|
||||||
[paramsSliceConfig.slice.reducerPath]: paramsSliceConfig,
|
|
||||||
[queueSliceConfig.slice.reducerPath]: queueSliceConfig,
|
|
||||||
[refImagesSliceConfig.slice.reducerPath]: refImagesSliceConfig,
|
|
||||||
[stylePresetSliceConfig.slice.reducerPath]: stylePresetSliceConfig,
|
|
||||||
[systemSliceConfig.slice.reducerPath]: systemSliceConfig,
|
|
||||||
[uiSliceConfig.slice.reducerPath]: uiSliceConfig,
|
|
||||||
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig,
|
|
||||||
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig,
|
|
||||||
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TS makes it really hard to dynamically create this object :/ so it's just hardcoded here.
|
|
||||||
// Remember to wrap undoable reducers in `undoable()`!
|
|
||||||
const ALL_REDUCERS = {
|
|
||||||
[api.reducerPath]: api.reducer,
|
[api.reducerPath]: api.reducer,
|
||||||
[canvasSessionSliceConfig.slice.reducerPath]: canvasSessionSliceConfig.slice.reducer,
|
[gallerySlice.name]: gallerySlice.reducer,
|
||||||
[canvasSettingsSliceConfig.slice.reducerPath]: canvasSettingsSliceConfig.slice.reducer,
|
[nodesSlice.name]: undoable(nodesSlice.reducer, nodesUndoableConfig),
|
||||||
// Undoable!
|
[systemSlice.name]: systemSlice.reducer,
|
||||||
[canvasSliceConfig.slice.reducerPath]: undoable(
|
[configSlice.name]: configSlice.reducer,
|
||||||
canvasSliceConfig.slice.reducer,
|
[uiSlice.name]: uiSlice.reducer,
|
||||||
canvasSliceConfig.undoableConfig?.reduxUndoOptions
|
[dynamicPromptsSlice.name]: dynamicPromptsSlice.reducer,
|
||||||
),
|
[changeBoardModalSlice.name]: changeBoardModalSlice.reducer,
|
||||||
[changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig.slice.reducer,
|
[modelManagerV2Slice.name]: modelManagerV2Slice.reducer,
|
||||||
[configSliceConfig.slice.reducerPath]: configSliceConfig.slice.reducer,
|
[queueSlice.name]: queueSlice.reducer,
|
||||||
[dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig.slice.reducer,
|
[canvasSlice.name]: undoable(canvasSlice.reducer, canvasUndoableConfig),
|
||||||
[gallerySliceConfig.slice.reducerPath]: gallerySliceConfig.slice.reducer,
|
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
|
||||||
[lorasSliceConfig.slice.reducerPath]: lorasSliceConfig.slice.reducer,
|
[upscaleSlice.name]: upscaleSlice.reducer,
|
||||||
[modelManagerSliceConfig.slice.reducerPath]: modelManagerSliceConfig.slice.reducer,
|
[stylePresetSlice.name]: stylePresetSlice.reducer,
|
||||||
// Undoable!
|
[paramsSlice.name]: paramsSlice.reducer,
|
||||||
[nodesSliceConfig.slice.reducerPath]: undoable(
|
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
|
||||||
nodesSliceConfig.slice.reducer,
|
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
|
||||||
nodesSliceConfig.undoableConfig?.reduxUndoOptions
|
[lorasSlice.name]: lorasSlice.reducer,
|
||||||
),
|
[workflowLibrarySlice.name]: workflowLibrarySlice.reducer,
|
||||||
[paramsSliceConfig.slice.reducerPath]: paramsSliceConfig.slice.reducer,
|
[refImagesSlice.name]: refImagesSlice.reducer,
|
||||||
[queueSliceConfig.slice.reducerPath]: queueSliceConfig.slice.reducer,
|
|
||||||
[refImagesSliceConfig.slice.reducerPath]: refImagesSliceConfig.slice.reducer,
|
|
||||||
[stylePresetSliceConfig.slice.reducerPath]: stylePresetSliceConfig.slice.reducer,
|
|
||||||
[systemSliceConfig.slice.reducerPath]: systemSliceConfig.slice.reducer,
|
|
||||||
[uiSliceConfig.slice.reducerPath]: uiSliceConfig.slice.reducer,
|
|
||||||
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig.slice.reducer,
|
|
||||||
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig.slice.reducer,
|
|
||||||
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig.slice.reducer,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const rootReducer = combineReducers(ALL_REDUCERS);
|
const rootReducer = combineReducers(allReducers);
|
||||||
|
|
||||||
const rememberedRootReducer = rememberReducer(rootReducer);
|
const rememberedRootReducer = rememberReducer(rootReducer);
|
||||||
|
|
||||||
|
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||||
|
export type PersistConfig<T = any> = {
|
||||||
|
/**
|
||||||
|
* The name of the slice.
|
||||||
|
*/
|
||||||
|
name: keyof typeof allReducers;
|
||||||
|
/**
|
||||||
|
* The initial state of the slice.
|
||||||
|
*/
|
||||||
|
initialState: T;
|
||||||
|
/**
|
||||||
|
* Migrate the state to the current version during rehydration.
|
||||||
|
* @param state The rehydrated state.
|
||||||
|
* @returns A correctly-shaped state.
|
||||||
|
*/
|
||||||
|
migrate: (state: unknown) => T;
|
||||||
|
/**
|
||||||
|
* Keys to omit from the persisted state.
|
||||||
|
*/
|
||||||
|
persistDenylist: (keyof T)[];
|
||||||
|
};
|
||||||
|
|
||||||
|
const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||||
|
[galleryPersistConfig.name]: galleryPersistConfig,
|
||||||
|
[nodesPersistConfig.name]: nodesPersistConfig,
|
||||||
|
[systemPersistConfig.name]: systemPersistConfig,
|
||||||
|
[uiPersistConfig.name]: uiPersistConfig,
|
||||||
|
[dynamicPromptsPersistConfig.name]: dynamicPromptsPersistConfig,
|
||||||
|
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
|
||||||
|
[canvasPersistConfig.name]: canvasPersistConfig,
|
||||||
|
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
|
||||||
|
[upscalePersistConfig.name]: upscalePersistConfig,
|
||||||
|
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
|
||||||
|
[paramsPersistConfig.name]: paramsPersistConfig,
|
||||||
|
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
|
||||||
|
[canvasStagingAreaPersistConfig.name]: canvasStagingAreaPersistConfig,
|
||||||
|
[lorasPersistConfig.name]: lorasPersistConfig,
|
||||||
|
[workflowLibraryPersistConfig.name]: workflowLibraryPersistConfig,
|
||||||
|
[refImagesSlice.name]: refImagesPersistConfig,
|
||||||
|
};
|
||||||
|
|
||||||
const unserialize: UnserializeFunction = (data, key) => {
|
const unserialize: UnserializeFunction = (data, key) => {
|
||||||
const sliceConfig = SLICE_CONFIGS[key as keyof typeof SLICE_CONFIGS];
|
const persistConfig = persistConfigs[key as keyof typeof persistConfigs];
|
||||||
if (!sliceConfig?.persistConfig) {
|
if (!persistConfig) {
|
||||||
throw new Error(`No persist config for slice "${key}"`);
|
throw new Error(`No persist config for slice "${key}"`);
|
||||||
}
|
}
|
||||||
const { getInitialState, persistConfig, undoableConfig } = sliceConfig;
|
|
||||||
let state;
|
|
||||||
try {
|
try {
|
||||||
const initialState = getInitialState();
|
const { initialState, migrate } = persistConfig;
|
||||||
const parsed = JSON.parse(data);
|
const parsed = JSON.parse(data);
|
||||||
|
|
||||||
// strip out old keys
|
// strip out old keys
|
||||||
const stripped = pick(deepClone(parsed), keys(initialState));
|
const stripped = pick(deepClone(parsed), keys(initialState));
|
||||||
|
// run (additive) migrations
|
||||||
|
const migrated = migrate(stripped);
|
||||||
/*
|
/*
|
||||||
* Merge in initial state as default values, covering any missing keys. You might be tempted to use _.defaultsDeep,
|
* Merge in initial state as default values, covering any missing keys. You might be tempted to use _.defaultsDeep,
|
||||||
* but that merges arrays by index and partial objects by key. Using an identity function as the customizer results
|
* but that merges arrays by index and partial objects by key. Using an identity function as the customizer results
|
||||||
* in behaviour like defaultsDeep, but doesn't overwrite any values that are not undefined in the migrated state.
|
* in behaviour like defaultsDeep, but doesn't overwrite any values that are not undefined in the migrated state.
|
||||||
*/
|
*/
|
||||||
const unPersistDenylisted = mergeWith(stripped, initialState, (objVal) => objVal);
|
const transformed = mergeWith(migrated, initialState, (objVal) => objVal);
|
||||||
// run (additive) migrations
|
|
||||||
const migrated = persistConfig.migrate(unPersistDenylisted);
|
|
||||||
|
|
||||||
log.debug(
|
log.debug(
|
||||||
{
|
{
|
||||||
persistedData: parsed as JsonObject,
|
persistedData: parsed,
|
||||||
rehydratedData: migrated as JsonObject,
|
rehydratedData: transformed,
|
||||||
diff: diff(data, migrated) as JsonObject,
|
diff: diff(parsed, transformed) as JsonObject, // this is always serializable
|
||||||
},
|
},
|
||||||
`Rehydrated slice "${key}"`
|
`Rehydrated slice "${key}"`
|
||||||
);
|
);
|
||||||
state = migrated;
|
return transformed;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
log.warn(
|
log.warn(
|
||||||
{ error: serializeError(err as Error) },
|
{ error: serializeError(err as Error) },
|
||||||
`Error rehydrating slice "${key}", falling back to default initial state`
|
`Error rehydrating slice "${key}", falling back to default initial state`
|
||||||
);
|
);
|
||||||
state = getInitialState();
|
return persistConfig.initialState;
|
||||||
}
|
|
||||||
|
|
||||||
// Undoable slices must be wrapped in a history!
|
|
||||||
if (undoableConfig) {
|
|
||||||
return newHistory([], state, []);
|
|
||||||
} else {
|
|
||||||
return state;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const serialize: SerializeFunction = (data, key) => {
|
const serialize: SerializeFunction = (data, key) => {
|
||||||
const sliceConfig = SLICE_CONFIGS[key as keyof typeof SLICE_CONFIGS];
|
const persistConfig = persistConfigs[key as keyof typeof persistConfigs];
|
||||||
if (!sliceConfig?.persistConfig) {
|
if (!persistConfig) {
|
||||||
throw new Error(`No persist config for slice "${key}"`);
|
throw new Error(`No persist config for slice "${key}"`);
|
||||||
}
|
}
|
||||||
|
// Heuristic to determine if the slice is undoable - could just hardcode it in the persistConfig
|
||||||
const result = omit(
|
const isUndoable = 'present' in data && 'past' in data && 'future' in data && '_latestUnfiltered' in data;
|
||||||
sliceConfig.undoableConfig ? data.present : data,
|
const result = omit(isUndoable ? data.present : data, persistConfig.persistDenylist);
|
||||||
sliceConfig.persistConfig.persistDenylist ?? []
|
|
||||||
);
|
|
||||||
|
|
||||||
return JSON.stringify(result);
|
return JSON.stringify(result);
|
||||||
};
|
};
|
||||||
|
|
||||||
const PERSISTED_KEYS = Object.values(SLICE_CONFIGS)
|
export const createStore = (uniqueStoreKey?: string, persist = true) =>
|
||||||
.filter((sliceConfig) => !!sliceConfig.persistConfig)
|
configureStore({
|
||||||
.map((sliceConfig) => sliceConfig.slice.reducerPath);
|
|
||||||
|
|
||||||
export const createStore = (options?: { persist?: boolean; persistThrottle?: number; onRehydrated?: () => void }) => {
|
|
||||||
const store = configureStore({
|
|
||||||
reducer: rememberedRootReducer,
|
reducer: rememberedRootReducer,
|
||||||
middleware: (getDefaultMiddleware) =>
|
middleware: (getDefaultMiddleware) =>
|
||||||
getDefaultMiddleware({
|
getDefaultMiddleware({
|
||||||
// serializableCheck: false,
|
|
||||||
// immutableCheck: false,
|
|
||||||
serializableCheck: import.meta.env.MODE === 'development',
|
serializableCheck: import.meta.env.MODE === 'development',
|
||||||
immutableCheck: import.meta.env.MODE === 'development',
|
immutableCheck: import.meta.env.MODE === 'development',
|
||||||
})
|
})
|
||||||
.concat(api.middleware)
|
.concat(api.middleware)
|
||||||
.concat(dynamicMiddlewares)
|
.concat(dynamicMiddlewares)
|
||||||
.concat(authToastMiddleware)
|
.concat(authToastMiddleware)
|
||||||
// .concat(getDebugLoggerMiddleware({ withDiff: true, withNextState: true }))
|
// .concat(getDebugLoggerMiddleware())
|
||||||
.prepend(listenerMiddleware.middleware),
|
.prepend(listenerMiddleware.middleware),
|
||||||
enhancers: (getDefaultEnhancers) => {
|
enhancers: (getDefaultEnhancers) => {
|
||||||
const enhancers = getDefaultEnhancers();
|
const _enhancers = getDefaultEnhancers().concat(autoBatchEnhancer());
|
||||||
if (options?.persist) {
|
if (persist) {
|
||||||
return enhancers.prepend(
|
_enhancers.push(
|
||||||
rememberEnhancer(reduxRememberDriver, PERSISTED_KEYS, {
|
rememberEnhancer(idbKeyValDriver, keys(persistConfigs), {
|
||||||
persistThrottle: options?.persistThrottle ?? 2000,
|
persistDebounce: 300,
|
||||||
serialize,
|
serialize,
|
||||||
unserialize,
|
unserialize,
|
||||||
prefix: '',
|
prefix: uniqueStoreKey ? `${STORAGE_PREFIX}${uniqueStoreKey}-` : STORAGE_PREFIX,
|
||||||
errorHandler,
|
errorHandler,
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
} else {
|
|
||||||
return enhancers;
|
|
||||||
}
|
}
|
||||||
|
return _enhancers;
|
||||||
},
|
},
|
||||||
devTools: {
|
devTools: {
|
||||||
actionSanitizer,
|
actionSanitizer,
|
||||||
@@ -228,62 +203,9 @@ export const createStore = (options?: { persist?: boolean; persistThrottle?: num
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
// Once-off listener to support waiting for rehydration before rendering the app
|
|
||||||
startAppListening({
|
|
||||||
actionCreator: createAction(REMEMBER_REHYDRATED),
|
|
||||||
effect: (action, { unsubscribe }) => {
|
|
||||||
unsubscribe();
|
|
||||||
options?.onRehydrated?.();
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
return store;
|
|
||||||
};
|
|
||||||
|
|
||||||
export type AppStore = ReturnType<typeof createStore>;
|
export type AppStore = ReturnType<typeof createStore>;
|
||||||
export type RootState = ReturnType<AppStore['getState']>;
|
export type RootState = ReturnType<AppStore['getState']>;
|
||||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
export type AppThunkDispatch = ThunkDispatch<RootState, any, UnknownAction>;
|
export type AppThunkDispatch = ThunkDispatch<RootState, any, UnknownAction>;
|
||||||
export type AppDispatch = ReturnType<typeof createStore>['dispatch'];
|
export type AppDispatch = ReturnType<typeof createStore>['dispatch'];
|
||||||
export type AppGetState = ReturnType<typeof createStore>['getState'];
|
export type AppGetState = ReturnType<typeof createStore>['getState'];
|
||||||
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
|
||||||
|
|
||||||
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
|
|
||||||
|
|
||||||
const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
|
||||||
addImageUploadedFulfilledListener(startAppListening);
|
|
||||||
|
|
||||||
// Image deleted
|
|
||||||
addDeleteBoardAndImagesFulfilledListener(startAppListening);
|
|
||||||
|
|
||||||
// User Invoked
|
|
||||||
addAnyEnqueuedListener(startAppListening);
|
|
||||||
addBatchEnqueuedListener(startAppListening);
|
|
||||||
|
|
||||||
// Socket.IO
|
|
||||||
addSocketConnectedEventListener(startAppListening);
|
|
||||||
|
|
||||||
// Gallery bulk download
|
|
||||||
addBulkDownloadListeners(startAppListening);
|
|
||||||
|
|
||||||
// Boards
|
|
||||||
addImageAddedToBoardFulfilledListener(startAppListening);
|
|
||||||
addImageRemovedFromBoardFulfilledListener(startAppListening);
|
|
||||||
addBoardIdSelectedListener(startAppListening);
|
|
||||||
addArchivedOrDeletedBoardListener(startAppListening);
|
|
||||||
|
|
||||||
// Node schemas
|
|
||||||
addGetOpenAPISchemaListener(startAppListening);
|
|
||||||
|
|
||||||
// Models
|
|
||||||
addModelSelectedListener(startAppListening);
|
|
||||||
|
|
||||||
// app startup
|
|
||||||
addAppStartedListener(startAppListening);
|
|
||||||
addModelsLoadedListener(startAppListening);
|
|
||||||
addAppConfigReceivedListener(startAppListening);
|
|
||||||
|
|
||||||
// Ad-hoc upscale workflwo
|
|
||||||
addAdHocPostProcessingRequestedListener(startAppListening);
|
|
||||||
|
|
||||||
addSetDefaultSettingsListener(startAppListening);
|
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
import type { Slice } from '@reduxjs/toolkit';
|
|
||||||
import type { UndoableOptions } from 'redux-undo';
|
|
||||||
import type { ZodType } from 'zod';
|
|
||||||
|
|
||||||
type StateFromSlice<T extends Slice> = T extends Slice<infer U> ? U : never;
|
|
||||||
|
|
||||||
export type SliceConfig<T extends Slice> = {
|
|
||||||
/**
|
|
||||||
* The redux slice (return of createSlice).
|
|
||||||
*/
|
|
||||||
slice: T;
|
|
||||||
/**
|
|
||||||
* The zod schema for the slice.
|
|
||||||
*/
|
|
||||||
schema: ZodType<StateFromSlice<T>>;
|
|
||||||
/**
|
|
||||||
* A function that returns the initial state of the slice.
|
|
||||||
*/
|
|
||||||
getInitialState: () => StateFromSlice<T>;
|
|
||||||
/**
|
|
||||||
* The optional persist configuration for this slice. If omitted, the slice will not be persisted.
|
|
||||||
*/
|
|
||||||
persistConfig?: {
|
|
||||||
/**
|
|
||||||
* Migrate the state to the current version during rehydration. This method should throw an error if the migration
|
|
||||||
* fails.
|
|
||||||
*
|
|
||||||
* @param state The rehydrated state.
|
|
||||||
* @returns A correctly-shaped state.
|
|
||||||
*/
|
|
||||||
migrate: (state: unknown) => StateFromSlice<T>;
|
|
||||||
/**
|
|
||||||
* Keys to omit from the persisted state.
|
|
||||||
*/
|
|
||||||
persistDenylist?: (keyof StateFromSlice<T>)[];
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* The optional undoable configuration for this slice. If omitted, the slice will not be undoable.
|
|
||||||
*/
|
|
||||||
undoableConfig?: {
|
|
||||||
/**
|
|
||||||
* The options to be passed into redux-undo.
|
|
||||||
*/
|
|
||||||
reduxUndoOptions: UndoableOptions<StateFromSlice<T>>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
@@ -1,299 +1,130 @@
|
|||||||
import { zFilterType } from 'features/controlLayers/store/filters';
|
import type { FilterType } from 'features/controlLayers/store/filters';
|
||||||
import { zParameterPrecision, zParameterScheduler } from 'features/parameters/types/parameterSchemas';
|
import type { ParameterPrecision, ParameterScheduler } from 'features/parameters/types/parameterSchemas';
|
||||||
import { zTabName } from 'features/ui/store/uiTypes';
|
import type { TabName } from 'features/ui/store/uiTypes';
|
||||||
import type { PartialDeep } from 'type-fest';
|
import type { PartialDeep } from 'type-fest';
|
||||||
import z from 'zod';
|
|
||||||
|
|
||||||
const zAppFeature = z.enum([
|
/**
|
||||||
'faceRestore',
|
* A disable-able application feature
|
||||||
'upscaling',
|
*/
|
||||||
'lightbox',
|
export type AppFeature =
|
||||||
'modelManager',
|
| 'faceRestore'
|
||||||
'githubLink',
|
| 'upscaling'
|
||||||
'discordLink',
|
| 'lightbox'
|
||||||
'bugLink',
|
| 'modelManager'
|
||||||
'aboutModal',
|
| 'githubLink'
|
||||||
'localization',
|
| 'discordLink'
|
||||||
'consoleLogging',
|
| 'bugLink'
|
||||||
'dynamicPrompting',
|
| 'aboutModal'
|
||||||
'batches',
|
| 'localization'
|
||||||
'syncModels',
|
| 'consoleLogging'
|
||||||
'multiselect',
|
| 'dynamicPrompting'
|
||||||
'pauseQueue',
|
| 'batches'
|
||||||
'resumeQueue',
|
| 'syncModels'
|
||||||
'invocationCache',
|
| 'multiselect'
|
||||||
'modelCache',
|
| 'pauseQueue'
|
||||||
'bulkDownload',
|
| 'resumeQueue'
|
||||||
'starterModels',
|
| 'invocationCache'
|
||||||
'hfToken',
|
| 'modelCache'
|
||||||
'retryQueueItem',
|
| 'bulkDownload'
|
||||||
'cancelAndClearAll',
|
| 'starterModels'
|
||||||
'chatGPT4oHigh',
|
| 'hfToken'
|
||||||
'modelRelationships',
|
| 'retryQueueItem'
|
||||||
]);
|
| 'cancelAndClearAll'
|
||||||
export type AppFeature = z.infer<typeof zAppFeature>;
|
| 'chatGPT4oHigh'
|
||||||
|
| 'modelRelationships';
|
||||||
|
/**
|
||||||
|
* A disable-able Stable Diffusion feature
|
||||||
|
*/
|
||||||
|
export type SDFeature =
|
||||||
|
| 'controlNet'
|
||||||
|
| 'noise'
|
||||||
|
| 'perlinNoise'
|
||||||
|
| 'noiseThreshold'
|
||||||
|
| 'variation'
|
||||||
|
| 'symmetry'
|
||||||
|
| 'seamless'
|
||||||
|
| 'hires'
|
||||||
|
| 'lora'
|
||||||
|
| 'embedding'
|
||||||
|
| 'vae'
|
||||||
|
| 'hrf';
|
||||||
|
|
||||||
const zSDFeature = z.enum([
|
export type NumericalParameterConfig = {
|
||||||
'controlNet',
|
initial: number;
|
||||||
'noise',
|
sliderMin: number;
|
||||||
'perlinNoise',
|
sliderMax: number;
|
||||||
'noiseThreshold',
|
numberInputMin: number;
|
||||||
'variation',
|
numberInputMax: number;
|
||||||
'symmetry',
|
fineStep: number;
|
||||||
'seamless',
|
coarseStep: number;
|
||||||
'hires',
|
};
|
||||||
'lora',
|
|
||||||
'embedding',
|
|
||||||
'vae',
|
|
||||||
'hrf',
|
|
||||||
]);
|
|
||||||
export type SDFeature = z.infer<typeof zSDFeature>;
|
|
||||||
|
|
||||||
const zNumericalParameterConfig = z.object({
|
|
||||||
initial: z.number().default(512),
|
|
||||||
sliderMin: z.number().default(64),
|
|
||||||
sliderMax: z.number().default(1536),
|
|
||||||
numberInputMin: z.number().default(64),
|
|
||||||
numberInputMax: z.number().default(4096),
|
|
||||||
fineStep: z.number().default(8),
|
|
||||||
coarseStep: z.number().default(64),
|
|
||||||
});
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configuration options for the InvokeAI UI.
|
* Configuration options for the InvokeAI UI.
|
||||||
* Distinct from system settings which may be changed inside the app.
|
* Distinct from system settings which may be changed inside the app.
|
||||||
*/
|
*/
|
||||||
export const zAppConfig = z.object({
|
export type AppConfig = {
|
||||||
/**
|
/**
|
||||||
* Whether or not we should update image urls when image loading errors
|
* Whether or not we should update image urls when image loading errors
|
||||||
*/
|
*/
|
||||||
shouldUpdateImagesOnConnect: z.boolean(),
|
shouldUpdateImagesOnConnect: boolean;
|
||||||
shouldFetchMetadataFromApi: z.boolean(),
|
shouldFetchMetadataFromApi: boolean;
|
||||||
/**
|
/**
|
||||||
* Sets a size limit for outputs on the upscaling tab. This is a maximum dimension, so the actual max number of pixels
|
* Sets a size limit for outputs on the upscaling tab. This is a maximum dimension, so the actual max number of pixels
|
||||||
* will be the square of this value.
|
* will be the square of this value.
|
||||||
*/
|
*/
|
||||||
maxUpscaleDimension: z.number().optional(),
|
maxUpscaleDimension?: number;
|
||||||
allowPrivateBoards: z.boolean(),
|
allowPrivateBoards: boolean;
|
||||||
allowPrivateStylePresets: z.boolean(),
|
allowPrivateStylePresets: boolean;
|
||||||
allowClientSideUpload: z.boolean(),
|
allowClientSideUpload: boolean;
|
||||||
allowPublishWorkflows: z.boolean(),
|
allowPublishWorkflows: boolean;
|
||||||
allowPromptExpansion: z.boolean(),
|
allowPromptExpansion: boolean;
|
||||||
disabledTabs: z.array(zTabName),
|
disabledTabs: TabName[];
|
||||||
disabledFeatures: z.array(zAppFeature),
|
disabledFeatures: AppFeature[];
|
||||||
disabledSDFeatures: z.array(zSDFeature),
|
disabledSDFeatures: SDFeature[];
|
||||||
nodesAllowlist: z.array(z.string()).optional(),
|
nodesAllowlist: string[] | undefined;
|
||||||
nodesDenylist: z.array(z.string()).optional(),
|
nodesDenylist: string[] | undefined;
|
||||||
metadataFetchDebounce: z.number().int().optional(),
|
metadataFetchDebounce?: number;
|
||||||
workflowFetchDebounce: z.number().int().optional(),
|
workflowFetchDebounce?: number;
|
||||||
isLocal: z.boolean().optional(),
|
isLocal?: boolean;
|
||||||
shouldShowCredits: z.boolean().optional(),
|
shouldShowCredits: boolean;
|
||||||
sd: z.object({
|
|
||||||
defaultModel: z.string().optional(),
|
|
||||||
disabledControlNetModels: z.array(z.string()),
|
|
||||||
disabledControlNetProcessors: z.array(zFilterType),
|
|
||||||
// Core parameters
|
|
||||||
iterations: zNumericalParameterConfig,
|
|
||||||
width: zNumericalParameterConfig,
|
|
||||||
height: zNumericalParameterConfig,
|
|
||||||
steps: zNumericalParameterConfig,
|
|
||||||
guidance: zNumericalParameterConfig,
|
|
||||||
cfgRescaleMultiplier: zNumericalParameterConfig,
|
|
||||||
img2imgStrength: zNumericalParameterConfig,
|
|
||||||
scheduler: zParameterScheduler.optional(),
|
|
||||||
vaePrecision: zParameterPrecision.optional(),
|
|
||||||
// Canvas
|
|
||||||
boundingBoxHeight: zNumericalParameterConfig,
|
|
||||||
boundingBoxWidth: zNumericalParameterConfig,
|
|
||||||
scaledBoundingBoxHeight: zNumericalParameterConfig,
|
|
||||||
scaledBoundingBoxWidth: zNumericalParameterConfig,
|
|
||||||
canvasCoherenceStrength: zNumericalParameterConfig,
|
|
||||||
canvasCoherenceEdgeSize: zNumericalParameterConfig,
|
|
||||||
infillTileSize: zNumericalParameterConfig,
|
|
||||||
infillPatchmatchDownscaleSize: zNumericalParameterConfig,
|
|
||||||
// Misc advanced
|
|
||||||
clipSkip: zNumericalParameterConfig, // slider and input max are ignored for this, because the values depend on the model
|
|
||||||
maskBlur: zNumericalParameterConfig,
|
|
||||||
hrfStrength: zNumericalParameterConfig,
|
|
||||||
dynamicPrompts: z.object({
|
|
||||||
maxPrompts: zNumericalParameterConfig,
|
|
||||||
}),
|
|
||||||
ca: z.object({
|
|
||||||
weight: zNumericalParameterConfig,
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
flux: z.object({
|
|
||||||
guidance: zNumericalParameterConfig,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
export type AppConfig = z.infer<typeof zAppConfig>;
|
|
||||||
export type PartialAppConfig = PartialDeep<AppConfig>;
|
|
||||||
|
|
||||||
export const getDefaultAppConfig = (): AppConfig => ({
|
|
||||||
isLocal: true,
|
|
||||||
shouldUpdateImagesOnConnect: false,
|
|
||||||
shouldFetchMetadataFromApi: false,
|
|
||||||
allowPrivateBoards: false,
|
|
||||||
allowPrivateStylePresets: false,
|
|
||||||
allowClientSideUpload: false,
|
|
||||||
allowPublishWorkflows: false,
|
|
||||||
allowPromptExpansion: false,
|
|
||||||
shouldShowCredits: false,
|
|
||||||
disabledTabs: [],
|
|
||||||
disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[],
|
|
||||||
disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[],
|
|
||||||
sd: {
|
sd: {
|
||||||
disabledControlNetModels: [],
|
defaultModel?: string;
|
||||||
disabledControlNetProcessors: [],
|
disabledControlNetModels: string[];
|
||||||
iterations: {
|
disabledControlNetProcessors: FilterType[];
|
||||||
initial: 1,
|
// Core parameters
|
||||||
sliderMin: 1,
|
iterations: NumericalParameterConfig;
|
||||||
sliderMax: 1000,
|
width: NumericalParameterConfig; // initial value comes from model
|
||||||
numberInputMin: 1,
|
height: NumericalParameterConfig; // initial value comes from model
|
||||||
numberInputMax: 10000,
|
steps: NumericalParameterConfig;
|
||||||
fineStep: 1,
|
guidance: NumericalParameterConfig;
|
||||||
coarseStep: 1,
|
cfgRescaleMultiplier: NumericalParameterConfig;
|
||||||
},
|
img2imgStrength: NumericalParameterConfig;
|
||||||
width: zNumericalParameterConfig.parse({}), // initial value comes from model
|
scheduler?: ParameterScheduler;
|
||||||
height: zNumericalParameterConfig.parse({}), // initial value comes from model
|
vaePrecision?: ParameterPrecision;
|
||||||
boundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model
|
// Canvas
|
||||||
boundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model
|
boundingBoxHeight: NumericalParameterConfig; // initial value comes from model
|
||||||
scaledBoundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model
|
boundingBoxWidth: NumericalParameterConfig; // initial value comes from model
|
||||||
scaledBoundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model
|
scaledBoundingBoxHeight: NumericalParameterConfig; // initial value comes from model
|
||||||
scheduler: 'dpmpp_3m_k' as const,
|
scaledBoundingBoxWidth: NumericalParameterConfig; // initial value comes from model
|
||||||
vaePrecision: 'fp32' as const,
|
canvasCoherenceStrength: NumericalParameterConfig;
|
||||||
steps: {
|
canvasCoherenceEdgeSize: NumericalParameterConfig;
|
||||||
initial: 30,
|
infillTileSize: NumericalParameterConfig;
|
||||||
sliderMin: 1,
|
infillPatchmatchDownscaleSize: NumericalParameterConfig;
|
||||||
sliderMax: 100,
|
// Misc advanced
|
||||||
numberInputMin: 1,
|
clipSkip: NumericalParameterConfig; // slider and input max are ignored for this, because the values depend on the model
|
||||||
numberInputMax: 500,
|
maskBlur: NumericalParameterConfig;
|
||||||
fineStep: 1,
|
hrfStrength: NumericalParameterConfig;
|
||||||
coarseStep: 1,
|
|
||||||
},
|
|
||||||
guidance: {
|
|
||||||
initial: 7,
|
|
||||||
sliderMin: 1,
|
|
||||||
sliderMax: 20,
|
|
||||||
numberInputMin: 1,
|
|
||||||
numberInputMax: 200,
|
|
||||||
fineStep: 0.1,
|
|
||||||
coarseStep: 0.5,
|
|
||||||
},
|
|
||||||
img2imgStrength: {
|
|
||||||
initial: 0.7,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 1,
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 1,
|
|
||||||
fineStep: 0.01,
|
|
||||||
coarseStep: 0.05,
|
|
||||||
},
|
|
||||||
canvasCoherenceStrength: {
|
|
||||||
initial: 0.3,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 1,
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 1,
|
|
||||||
fineStep: 0.01,
|
|
||||||
coarseStep: 0.05,
|
|
||||||
},
|
|
||||||
hrfStrength: {
|
|
||||||
initial: 0.45,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 1,
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 1,
|
|
||||||
fineStep: 0.01,
|
|
||||||
coarseStep: 0.05,
|
|
||||||
},
|
|
||||||
canvasCoherenceEdgeSize: {
|
|
||||||
initial: 16,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 128,
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 1024,
|
|
||||||
fineStep: 8,
|
|
||||||
coarseStep: 16,
|
|
||||||
},
|
|
||||||
cfgRescaleMultiplier: {
|
|
||||||
initial: 0,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 0.99,
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 0.99,
|
|
||||||
fineStep: 0.05,
|
|
||||||
coarseStep: 0.1,
|
|
||||||
},
|
|
||||||
clipSkip: {
|
|
||||||
initial: 0,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 12, // determined by model selection, unused in practice
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 12, // determined by model selection, unused in practice
|
|
||||||
fineStep: 1,
|
|
||||||
coarseStep: 1,
|
|
||||||
},
|
|
||||||
infillPatchmatchDownscaleSize: {
|
|
||||||
initial: 1,
|
|
||||||
sliderMin: 1,
|
|
||||||
sliderMax: 10,
|
|
||||||
numberInputMin: 1,
|
|
||||||
numberInputMax: 10,
|
|
||||||
fineStep: 1,
|
|
||||||
coarseStep: 1,
|
|
||||||
},
|
|
||||||
infillTileSize: {
|
|
||||||
initial: 32,
|
|
||||||
sliderMin: 16,
|
|
||||||
sliderMax: 64,
|
|
||||||
numberInputMin: 16,
|
|
||||||
numberInputMax: 256,
|
|
||||||
fineStep: 1,
|
|
||||||
coarseStep: 1,
|
|
||||||
},
|
|
||||||
maskBlur: {
|
|
||||||
initial: 16,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 128,
|
|
||||||
numberInputMin: 0,
|
|
||||||
numberInputMax: 512,
|
|
||||||
fineStep: 1,
|
|
||||||
coarseStep: 1,
|
|
||||||
},
|
|
||||||
ca: {
|
|
||||||
weight: {
|
|
||||||
initial: 1,
|
|
||||||
sliderMin: 0,
|
|
||||||
sliderMax: 2,
|
|
||||||
numberInputMin: -1,
|
|
||||||
numberInputMax: 2,
|
|
||||||
fineStep: 0.01,
|
|
||||||
coarseStep: 0.05,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
dynamicPrompts: {
|
dynamicPrompts: {
|
||||||
maxPrompts: {
|
maxPrompts: NumericalParameterConfig;
|
||||||
initial: 100,
|
};
|
||||||
sliderMin: 1,
|
ca: {
|
||||||
sliderMax: 1000,
|
weight: NumericalParameterConfig;
|
||||||
numberInputMin: 1,
|
};
|
||||||
numberInputMax: 10000,
|
};
|
||||||
fineStep: 1,
|
|
||||||
coarseStep: 10,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
flux: {
|
flux: {
|
||||||
guidance: {
|
guidance: NumericalParameterConfig;
|
||||||
initial: 4,
|
};
|
||||||
sliderMin: 2,
|
};
|
||||||
sliderMax: 6,
|
|
||||||
numberInputMin: 1,
|
export type PartialAppConfig = PartialDeep<AppConfig>;
|
||||||
numberInputMax: 20,
|
|
||||||
fineStep: 0.1,
|
|
||||||
coarseStep: 0.5,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|||||||
@@ -67,8 +67,6 @@ export type Feature =
|
|||||||
| 'scale'
|
| 'scale'
|
||||||
| 'creativity'
|
| 'creativity'
|
||||||
| 'structure'
|
| 'structure'
|
||||||
| 'tileSize'
|
|
||||||
| 'tileOverlap'
|
|
||||||
| 'optimizedDenoising'
|
| 'optimizedDenoising'
|
||||||
| 'fluxDevLicense';
|
| 'fluxDevLicense';
|
||||||
|
|
||||||
|
|||||||
@@ -11,13 +11,9 @@ import {
|
|||||||
Text,
|
Text,
|
||||||
} from '@invoke-ai/ui-library';
|
} from '@invoke-ai/ui-library';
|
||||||
import { useStore } from '@nanostores/react';
|
import { useStore } from '@nanostores/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
|
||||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||||
import { typedMemo } from 'common/util/typedMemo';
|
import { typedMemo } from 'common/util/typedMemo';
|
||||||
import { NO_DRAG_CLASS, NO_WHEEL_CLASS } from 'features/nodes/types/constants';
|
import { NO_DRAG_CLASS, NO_WHEEL_CLASS } from 'features/nodes/types/constants';
|
||||||
import { selectPickerCompactViewStates } from 'features/ui/store/uiSelectors';
|
|
||||||
import { pickerCompactViewStateChanged } from 'features/ui/store/uiSlice';
|
|
||||||
import type { AnyStore, ReadableAtom, Task, WritableAtom } from 'nanostores';
|
import type { AnyStore, ReadableAtom, Task, WritableAtom } from 'nanostores';
|
||||||
import { atom, computed } from 'nanostores';
|
import { atom, computed } from 'nanostores';
|
||||||
import type { StoreValues } from 'nanostores/computed';
|
import type { StoreValues } from 'nanostores/computed';
|
||||||
@@ -144,10 +140,6 @@ const NoMatchesFallbackWrapper = typedMemo(({ children }: PropsWithChildren) =>
|
|||||||
NoMatchesFallbackWrapper.displayName = 'NoMatchesFallbackWrapper';
|
NoMatchesFallbackWrapper.displayName = 'NoMatchesFallbackWrapper';
|
||||||
|
|
||||||
type PickerProps<T extends object> = {
|
type PickerProps<T extends object> = {
|
||||||
/**
|
|
||||||
* Unique identifier for this picker instance. Used to persist compact view state.
|
|
||||||
*/
|
|
||||||
pickerId?: string;
|
|
||||||
/**
|
/**
|
||||||
* The options to display in the picker. This can be a flat array of options or an array of groups.
|
* The options to display in the picker. This can be a flat array of options or an array of groups.
|
||||||
*/
|
*/
|
||||||
@@ -212,18 +204,10 @@ type PickerProps<T extends object> = {
|
|||||||
initialGroupStates?: GroupStatusMap;
|
initialGroupStates?: GroupStatusMap;
|
||||||
};
|
};
|
||||||
|
|
||||||
const buildSelectIsCompactView = (pickerId?: string) =>
|
|
||||||
createSelector([selectPickerCompactViewStates], (compactViewStates) => {
|
|
||||||
if (!pickerId) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return compactViewStates[pickerId] ?? true;
|
|
||||||
});
|
|
||||||
|
|
||||||
export type PickerContextState<T extends object> = {
|
export type PickerContextState<T extends object> = {
|
||||||
$optionsOrGroups: WritableAtom<OptionOrGroup<T>[]>;
|
$optionsOrGroups: WritableAtom<OptionOrGroup<T>[]>;
|
||||||
$groupStatusMap: WritableAtom<GroupStatusMap>;
|
$groupStatusMap: WritableAtom<GroupStatusMap>;
|
||||||
isCompactView: boolean;
|
$compactView: WritableAtom<boolean>;
|
||||||
$activeOptionId: WritableAtom<string | undefined>;
|
$activeOptionId: WritableAtom<string | undefined>;
|
||||||
$filteredOptions: WritableAtom<OptionOrGroup<T>[]>;
|
$filteredOptions: WritableAtom<OptionOrGroup<T>[]>;
|
||||||
$flattenedFilteredOptions: ReadableAtom<T[]>;
|
$flattenedFilteredOptions: ReadableAtom<T[]>;
|
||||||
@@ -249,7 +233,6 @@ export type PickerContextState<T extends object> = {
|
|||||||
OptionComponent: React.ComponentType<{ option: T } & BoxProps>;
|
OptionComponent: React.ComponentType<{ option: T } & BoxProps>;
|
||||||
NextToSearchBar?: React.ReactNode;
|
NextToSearchBar?: React.ReactNode;
|
||||||
searchable?: boolean;
|
searchable?: boolean;
|
||||||
pickerId?: string;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||||
@@ -520,7 +503,6 @@ const countOptions = <T extends object>(optionsOrGroups: OptionOrGroup<T>[]) =>
|
|||||||
|
|
||||||
export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
||||||
const {
|
const {
|
||||||
pickerId,
|
|
||||||
getOptionId,
|
getOptionId,
|
||||||
optionsOrGroups,
|
optionsOrGroups,
|
||||||
handleRef,
|
handleRef,
|
||||||
@@ -539,12 +521,12 @@ export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
|||||||
} = props;
|
} = props;
|
||||||
const rootRef = useRef<HTMLDivElement>(null);
|
const rootRef = useRef<HTMLDivElement>(null);
|
||||||
const inputRef = useRef<HTMLInputElement>(null);
|
const inputRef = useRef<HTMLInputElement>(null);
|
||||||
|
|
||||||
const { $groupStatusMap, $areAllGroupsDisabled, toggleGroup } = useTogglableGroups(
|
const { $groupStatusMap, $areAllGroupsDisabled, toggleGroup } = useTogglableGroups(
|
||||||
optionsOrGroups,
|
optionsOrGroups,
|
||||||
initialGroupStates
|
initialGroupStates
|
||||||
);
|
);
|
||||||
const $activeOptionId = useAtom(getFirstOptionId(optionsOrGroups, getOptionId));
|
const $activeOptionId = useAtom(getFirstOptionId(optionsOrGroups, getOptionId));
|
||||||
|
const $compactView = useAtom(true);
|
||||||
const $optionsOrGroups = useAtom(optionsOrGroups);
|
const $optionsOrGroups = useAtom(optionsOrGroups);
|
||||||
const $totalOptionCount = useComputed([$optionsOrGroups], countOptions);
|
const $totalOptionCount = useComputed([$optionsOrGroups], countOptions);
|
||||||
const $filteredOptions = useAtom<OptionOrGroup<T>[]>([]);
|
const $filteredOptions = useAtom<OptionOrGroup<T>[]>([]);
|
||||||
@@ -556,9 +538,6 @@ export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
|||||||
const $searchTerm = useAtom('');
|
const $searchTerm = useAtom('');
|
||||||
const $selectedItemId = useComputed([$selectedItem], (item) => (item ? getOptionId(item) : undefined));
|
const $selectedItemId = useComputed([$selectedItem], (item) => (item ? getOptionId(item) : undefined));
|
||||||
|
|
||||||
const selectIsCompactView = useMemo(() => buildSelectIsCompactView(pickerId), [pickerId]);
|
|
||||||
const isCompactView = useAppSelector(selectIsCompactView);
|
|
||||||
|
|
||||||
const onSelectById = useCallback(
|
const onSelectById = useCallback(
|
||||||
(id: string) => {
|
(id: string) => {
|
||||||
const options = $filteredOptions.get();
|
const options = $filteredOptions.get();
|
||||||
@@ -586,7 +565,7 @@ export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
|||||||
({
|
({
|
||||||
$optionsOrGroups,
|
$optionsOrGroups,
|
||||||
$groupStatusMap,
|
$groupStatusMap,
|
||||||
isCompactView,
|
$compactView,
|
||||||
$activeOptionId,
|
$activeOptionId,
|
||||||
$filteredOptions,
|
$filteredOptions,
|
||||||
$flattenedFilteredOptions,
|
$flattenedFilteredOptions,
|
||||||
@@ -612,12 +591,11 @@ export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
|||||||
$hasOptions,
|
$hasOptions,
|
||||||
$hasFilteredOptions,
|
$hasFilteredOptions,
|
||||||
$filteredOptionsCount,
|
$filteredOptionsCount,
|
||||||
pickerId,
|
|
||||||
}) satisfies PickerContextState<T>,
|
}) satisfies PickerContextState<T>,
|
||||||
[
|
[
|
||||||
$optionsOrGroups,
|
$optionsOrGroups,
|
||||||
$groupStatusMap,
|
$groupStatusMap,
|
||||||
isCompactView,
|
$compactView,
|
||||||
$activeOptionId,
|
$activeOptionId,
|
||||||
$filteredOptions,
|
$filteredOptions,
|
||||||
$flattenedFilteredOptions,
|
$flattenedFilteredOptions,
|
||||||
@@ -641,7 +619,6 @@ export const Picker = typedMemo(<T extends object>(props: PickerProps<T>) => {
|
|||||||
$hasOptions,
|
$hasOptions,
|
||||||
$hasFilteredOptions,
|
$hasFilteredOptions,
|
||||||
$filteredOptionsCount,
|
$filteredOptionsCount,
|
||||||
pickerId,
|
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -892,17 +869,15 @@ GroupToggleButtons.displayName = 'GroupToggleButtons';
|
|||||||
|
|
||||||
const CompactViewToggleButton = typedMemo(<T extends object>() => {
|
const CompactViewToggleButton = typedMemo(<T extends object>() => {
|
||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
const dispatch = useAppDispatch();
|
const { $compactView } = usePickerContext<T>();
|
||||||
const { isCompactView, pickerId } = usePickerContext<T>();
|
const compactView = useStore($compactView);
|
||||||
|
|
||||||
const onClick = useCallback(() => {
|
const onClick = useCallback(() => {
|
||||||
if (pickerId) {
|
$compactView.set(!$compactView.get());
|
||||||
dispatch(pickerCompactViewStateChanged({ pickerId, isCompact: !isCompactView }));
|
}, [$compactView]);
|
||||||
}
|
|
||||||
}, [dispatch, pickerId, isCompactView]);
|
|
||||||
|
|
||||||
const label = isCompactView ? t('common.fullView') : t('common.compactView');
|
const label = compactView ? t('common.fullView') : t('common.compactView');
|
||||||
const icon = isCompactView ? <PiArrowsOutLineVerticalBold /> : <PiArrowsInLineVerticalBold />;
|
const icon = compactView ? <PiArrowsOutLineVerticalBold /> : <PiArrowsInLineVerticalBold />;
|
||||||
|
|
||||||
return <IconButton aria-label={label} tooltip={label} size="sm" variant="ghost" icon={icon} onClick={onClick} />;
|
return <IconButton aria-label={label} tooltip={label} size="sm" variant="ghost" icon={icon} onClick={onClick} />;
|
||||||
});
|
});
|
||||||
@@ -949,7 +924,8 @@ const listSx = {
|
|||||||
} satisfies SystemStyleObject;
|
} satisfies SystemStyleObject;
|
||||||
|
|
||||||
const PickerList = typedMemo(<T extends object>() => {
|
const PickerList = typedMemo(<T extends object>() => {
|
||||||
const { getOptionId, isCompactView, $filteredOptions } = usePickerContext<T>();
|
const { getOptionId, $compactView, $filteredOptions } = usePickerContext<T>();
|
||||||
|
const compactView = useStore($compactView);
|
||||||
const filteredOptions = useStore($filteredOptions);
|
const filteredOptions = useStore($filteredOptions);
|
||||||
|
|
||||||
if (filteredOptions.length === 0) {
|
if (filteredOptions.length === 0) {
|
||||||
@@ -958,10 +934,10 @@ const PickerList = typedMemo(<T extends object>() => {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<ScrollableContent>
|
<ScrollableContent>
|
||||||
<Flex sx={listSx} data-is-compact={isCompactView}>
|
<Flex sx={listSx} data-is-compact={compactView}>
|
||||||
{filteredOptions.map((optionOrGroup, i) => {
|
{filteredOptions.map((optionOrGroup, i) => {
|
||||||
if (isGroup(optionOrGroup)) {
|
if (isGroup(optionOrGroup)) {
|
||||||
const withDivider = !isCompactView && i < filteredOptions.length - 1;
|
const withDivider = !compactView && i < filteredOptions.length - 1;
|
||||||
return (
|
return (
|
||||||
<React.Fragment key={optionOrGroup.id}>
|
<React.Fragment key={optionOrGroup.id}>
|
||||||
<PickerGroup group={optionOrGroup} />
|
<PickerGroup group={optionOrGroup} />
|
||||||
@@ -1103,13 +1079,14 @@ const groupHeaderSx = {
|
|||||||
|
|
||||||
const PickerGroupHeader = typedMemo(<T extends object>({ group }: { group: Group<T> }) => {
|
const PickerGroupHeader = typedMemo(<T extends object>({ group }: { group: Group<T> }) => {
|
||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
const { isCompactView } = usePickerContext<T>();
|
const { $compactView } = usePickerContext<T>();
|
||||||
|
const compactView = useStore($compactView);
|
||||||
const color = getGroupColor(group);
|
const color = getGroupColor(group);
|
||||||
const name = getGroupName(group);
|
const name = getGroupName(group);
|
||||||
const count = getGroupCount(group, t);
|
const count = getGroupCount(group, t);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Flex sx={groupHeaderSx} data-is-compact={isCompactView}>
|
<Flex sx={groupHeaderSx} data-is-compact={compactView}>
|
||||||
<Flex gap={2} alignItems="center">
|
<Flex gap={2} alignItems="center">
|
||||||
<Text fontSize="sm" fontWeight="semibold" color={color} noOfLines={1}>
|
<Text fontSize="sm" fontWeight="semibold" color={color} noOfLines={1}>
|
||||||
{name}
|
{name}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
import { MenuItem } from '@invoke-ai/ui-library';
|
import { MenuItem } from '@invoke-ai/ui-library';
|
||||||
import { useAppDispatch } from 'app/store/storeHooks';
|
import { useAppDispatch } from 'app/store/storeHooks';
|
||||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
import { allEntitiesDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||||
import { inpaintMaskAdded } from 'features/controlLayers/store/canvasSlice';
|
|
||||||
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
|
|
||||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||||
import { memo, useCallback } from 'react';
|
import { memo, useCallback } from 'react';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
@@ -13,9 +11,7 @@ export const SessionMenuItems = memo(() => {
|
|||||||
const dispatch = useAppDispatch();
|
const dispatch = useAppDispatch();
|
||||||
|
|
||||||
const resetCanvasLayers = useCallback(() => {
|
const resetCanvasLayers = useCallback(() => {
|
||||||
dispatch(canvasReset());
|
dispatch(allEntitiesDeleted());
|
||||||
dispatch(inpaintMaskAdded({ isSelected: true, isBookmarked: true }));
|
|
||||||
$canvasManager.get()?.stage.fitBboxToStage();
|
|
||||||
}, [dispatch]);
|
}, [dispatch]);
|
||||||
const resetGenerationSettings = useCallback(() => {
|
const resetGenerationSettings = useCallback(() => {
|
||||||
dispatch(paramsReset());
|
dispatch(paramsReset());
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import { atom, computed } from 'nanostores';
|
|||||||
import type { RefObject } from 'react';
|
import type { RefObject } from 'react';
|
||||||
import { useEffect } from 'react';
|
import { useEffect } from 'react';
|
||||||
import { objectKeys } from 'tsafe';
|
import { objectKeys } from 'tsafe';
|
||||||
|
import z from 'zod/v4';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We need to manage focus regions to conditionally enable hotkeys:
|
* We need to manage focus regions to conditionally enable hotkeys:
|
||||||
@@ -27,7 +28,10 @@ import { objectKeys } from 'tsafe';
|
|||||||
|
|
||||||
const log = logger('system');
|
const log = logger('system');
|
||||||
|
|
||||||
const REGION_NAMES = [
|
/**
|
||||||
|
* The names of the focus regions.
|
||||||
|
*/
|
||||||
|
const zFocusRegionName = z.enum([
|
||||||
'launchpad',
|
'launchpad',
|
||||||
'viewer',
|
'viewer',
|
||||||
'gallery',
|
'gallery',
|
||||||
@@ -37,16 +41,13 @@ const REGION_NAMES = [
|
|||||||
'workflows',
|
'workflows',
|
||||||
'progress',
|
'progress',
|
||||||
'settings',
|
'settings',
|
||||||
] as const;
|
]);
|
||||||
/**
|
export type FocusRegionName = z.infer<typeof zFocusRegionName>;
|
||||||
* The names of the focus regions.
|
|
||||||
*/
|
|
||||||
export type FocusRegionName = (typeof REGION_NAMES)[number];
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A map of focus regions to the elements that are part of that region.
|
* A map of focus regions to the elements that are part of that region.
|
||||||
*/
|
*/
|
||||||
const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = REGION_NAMES.reduce(
|
const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = zFocusRegionName.options.values().reduce(
|
||||||
(acc, region) => {
|
(acc, region) => {
|
||||||
acc[region] = new Set<HTMLElement>();
|
acc[region] = new Set<HTMLElement>();
|
||||||
return acc;
|
return acc;
|
||||||
|
|||||||
11
invokeai/frontend/web/src/common/hooks/useClearStorage.ts
Normal file
11
invokeai/frontend/web/src/common/hooks/useClearStorage.ts
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
import { clearIdbKeyValStore } from 'app/store/enhancers/reduxRemember/driver';
|
||||||
|
import { useCallback } from 'react';
|
||||||
|
|
||||||
|
export const useClearStorage = () => {
|
||||||
|
const clearStorage = useCallback(() => {
|
||||||
|
clearIdbKeyValStore();
|
||||||
|
localStorage.clear();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return clearStorage;
|
||||||
|
};
|
||||||
@@ -139,13 +139,4 @@ export const useGlobalHotkeys = () => {
|
|||||||
},
|
},
|
||||||
dependencies: [getState, deleteImageModalApi],
|
dependencies: [getState, deleteImageModalApi],
|
||||||
});
|
});
|
||||||
|
|
||||||
useRegisteredHotkeys({
|
|
||||||
id: 'toggleViewer',
|
|
||||||
category: 'viewer',
|
|
||||||
callback: () => {
|
|
||||||
navigationApi.toggleViewerPanel();
|
|
||||||
},
|
|
||||||
dependencies: [],
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
|||||||
import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
|
import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
|
||||||
import { toast } from 'features/toast/toast';
|
import { toast } from 'features/toast/toast';
|
||||||
import { memo, useCallback } from 'react';
|
import { memo, useCallback } from 'react';
|
||||||
import type { Accept, FileRejection } from 'react-dropzone';
|
import type { FileRejection } from 'react-dropzone';
|
||||||
import { useDropzone } from 'react-dropzone';
|
import { useDropzone } from 'react-dropzone';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
import { PiUploadBold } from 'react-icons/pi';
|
import { PiUploadBold } from 'react-icons/pi';
|
||||||
@@ -15,18 +15,6 @@ import type { ImageDTO } from 'services/api/types';
|
|||||||
import { assert } from 'tsafe';
|
import { assert } from 'tsafe';
|
||||||
import type { SetOptional } from 'type-fest';
|
import type { SetOptional } from 'type-fest';
|
||||||
|
|
||||||
const addUpperCaseReducer = (acc: string[], ext: string) => {
|
|
||||||
acc.push(ext);
|
|
||||||
acc.push(ext.toUpperCase());
|
|
||||||
return acc;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const dropzoneAccept: Accept = {
|
|
||||||
'image/png': ['.png'].reduce(addUpperCaseReducer, [] as string[]),
|
|
||||||
'image/jpeg': ['.jpg', '.jpeg', '.png'].reduce(addUpperCaseReducer, [] as string[]),
|
|
||||||
'image/webp': ['.webp'].reduce(addUpperCaseReducer, [] as string[]),
|
|
||||||
};
|
|
||||||
|
|
||||||
import { useClientSideUpload } from './useClientSideUpload';
|
import { useClientSideUpload } from './useClientSideUpload';
|
||||||
type UseImageUploadButtonArgs =
|
type UseImageUploadButtonArgs =
|
||||||
| {
|
| {
|
||||||
@@ -176,7 +164,11 @@ export const useImageUploadButton = ({
|
|||||||
getInputProps: getUploadInputProps,
|
getInputProps: getUploadInputProps,
|
||||||
open: openUploader,
|
open: openUploader,
|
||||||
} = useDropzone({
|
} = useDropzone({
|
||||||
accept: dropzoneAccept,
|
accept: {
|
||||||
|
'image/png': ['.png'],
|
||||||
|
'image/jpeg': ['.jpg', '.jpeg', '.png'],
|
||||||
|
'image/webp': ['.webp'],
|
||||||
|
},
|
||||||
onDropAccepted,
|
onDropAccepted,
|
||||||
onDropRejected,
|
onDropRejected,
|
||||||
disabled: isDisabled,
|
disabled: isDisabled,
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
import type { MouseEvent } from 'react';
|
export const preventDefault = (e: React.MouseEvent) => {
|
||||||
|
|
||||||
export const preventDefault = (e: MouseEvent) => {
|
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
};
|
};
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user