mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 07:28:06 -05:00
Compare commits
299 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8bdd779b4 | ||
|
|
f92b4d4f35 | ||
|
|
3ca00e3948 | ||
|
|
61114ce854 | ||
|
|
f3554b4e1b | ||
|
|
9dcb025241 | ||
|
|
ecf646066a | ||
|
|
3fd10b68cd | ||
|
|
6e32c7993c | ||
|
|
8329533848 | ||
|
|
fc7157b029 | ||
|
|
a1897f7490 | ||
|
|
a89b3efd14 | ||
|
|
5259693ed1 | ||
|
|
d77c24206d | ||
|
|
c5069557f3 | ||
|
|
9b220f61bd | ||
|
|
7fc3af12cc | ||
|
|
e2721b46b6 | ||
|
|
17118a04bd | ||
|
|
24788e3c83 | ||
|
|
056387c981 | ||
|
|
8a43d90273 | ||
|
|
4f9b9760db | ||
|
|
fdaddafa56 | ||
|
|
23d59abbd7 | ||
|
|
cf7fa5bce8 | ||
|
|
39e41998bb | ||
|
|
c6eff71b74 | ||
|
|
6ea4c47757 | ||
|
|
91f91aa835 | ||
|
|
ea7868d076 | ||
|
|
7d86f00d82 | ||
|
|
7785061e7d | ||
|
|
3370052e54 | ||
|
|
325dacd29c | ||
|
|
f4981a6ba9 | ||
|
|
8c159942eb | ||
|
|
deb4dc64af | ||
|
|
1a11437b6f | ||
|
|
04572c94ad | ||
|
|
1e9e78089e | ||
|
|
e65f93663d | ||
|
|
2a796fe25e | ||
|
|
61ff9ee3a7 | ||
|
|
111408c046 | ||
|
|
d7619d465e | ||
|
|
8ad4f6e56d | ||
|
|
bf4899526f | ||
|
|
6435d265c6 | ||
|
|
3163ef454d | ||
|
|
7ea636df70 | ||
|
|
1869824803 | ||
|
|
66fc8af8a6 | ||
|
|
48cb6b12f0 | ||
|
|
68e30a9864 | ||
|
|
f65dc2c081 | ||
|
|
0cd77443a7 | ||
|
|
185ed86424 | ||
|
|
fed817ab83 | ||
|
|
e0b45db69a | ||
|
|
2beac1fb04 | ||
|
|
e522de33f8 | ||
|
|
d591b50c25 | ||
|
|
b365aad6d8 | ||
|
|
65ad392361 | ||
|
|
56d75e1c77 | ||
|
|
df77a12efe | ||
|
|
faf662d12e | ||
|
|
44a7dfd486 | ||
|
|
bb15e5cf06 | ||
|
|
1a1c846be3 | ||
|
|
93c896a370 | ||
|
|
053d7c8c8e | ||
|
|
5296263954 | ||
|
|
a36b70c01c | ||
|
|
854a2a5a7a | ||
|
|
f9c64b0609 | ||
|
|
5889fa536a | ||
|
|
0e71ba892f | ||
|
|
d766a21223 | ||
|
|
5c8c54eab8 | ||
|
|
f296f4525c | ||
|
|
7c9ba4cb52 | ||
|
|
6784fd5b43 | ||
|
|
11d68cc646 | ||
|
|
ea8c877025 | ||
|
|
7a3c2332dd | ||
|
|
3835fd2f72 | ||
|
|
6f8746040c | ||
|
|
35e3940a09 | ||
|
|
415616d83f | ||
|
|
afb67efef9 | ||
|
|
1ed1fefa60 | ||
|
|
fa94a05c77 | ||
|
|
7a23d8266f | ||
|
|
a44de079dd | ||
|
|
c3c1a3edd8 | ||
|
|
ea26b5b147 | ||
|
|
4226b741b1 | ||
|
|
1424b7c254 | ||
|
|
933fb2294c | ||
|
|
5a181ee0fd | ||
|
|
3b0d59e459 | ||
|
|
fec296e41d | ||
|
|
ae4e38c6d0 | ||
|
|
a9f3f1a4b2 | ||
|
|
8a73df4fe1 | ||
|
|
ea2e1ea8f0 | ||
|
|
e8aa91931d | ||
|
|
8d22a314a6 | ||
|
|
57ce2b8aa7 | ||
|
|
6b810cb3fb | ||
|
|
4f3a5dcc43 | ||
|
|
c3ae14cf73 | ||
|
|
b9c44b92d5 | ||
|
|
5a68b4ddbc | ||
|
|
18a722839b | ||
|
|
7370cb9be6 | ||
|
|
cc4df52f82 | ||
|
|
1cb4ef05a4 | ||
|
|
7da141101c | ||
|
|
2571e199c5 | ||
|
|
79e93f905e | ||
|
|
f562e4f835 | ||
|
|
47e220aaf3 | ||
|
|
9365154bfe | ||
|
|
afc6911c96 | ||
|
|
afa1ee7ffd | ||
|
|
5a102f6b53 | ||
|
|
af345a33f3 | ||
|
|
038b110a82 | ||
|
|
f3cd49d46e | ||
|
|
ca7d7c9d93 | ||
|
|
1addeb4b59 | ||
|
|
6ea4884b0c | ||
|
|
aed9b1013e | ||
|
|
6962536b4a | ||
|
|
7e59d040aa | ||
|
|
e7c67da2c2 | ||
|
|
c44571bc36 | ||
|
|
ca257650d4 | ||
|
|
6a9962d2bb | ||
|
|
9492569a2c | ||
|
|
61e711620d | ||
|
|
3cf82505bb | ||
|
|
53bcbc58f5 | ||
|
|
42f3990f7a | ||
|
|
456205da17 | ||
|
|
ca0684700e | ||
|
|
6a702821ef | ||
|
|
682d271f6f | ||
|
|
e872c253b1 | ||
|
|
28633c9983 | ||
|
|
70ac58e64a | ||
|
|
e653837236 | ||
|
|
2bbfcc2f13 | ||
|
|
d6e0e439c5 | ||
|
|
26aab60f81 | ||
|
|
7bea2fa11f | ||
|
|
169d58ea4c | ||
|
|
b53d2250f7 | ||
|
|
242eea8295 | ||
|
|
4dabe09e0d | ||
|
|
07fa0d3b77 | ||
|
|
e97f82292f | ||
|
|
005bab9035 | ||
|
|
409173919c | ||
|
|
7915180047 | ||
|
|
4349b8387d | ||
|
|
f95b686bdc | ||
|
|
72afb9c3fd | ||
|
|
f004fc31f1 | ||
|
|
2aa163b3a2 | ||
|
|
f40900c173 | ||
|
|
2c1f2b2873 | ||
|
|
8418e34480 | ||
|
|
b548ac0ccf | ||
|
|
2af2b8b6c4 | ||
|
|
058dc06748 | ||
|
|
8acb1c0088 | ||
|
|
683732a37c | ||
|
|
b990eacca0 | ||
|
|
5f7e920deb | ||
|
|
55dfdc0a9c | ||
|
|
10d6d19e17 | ||
|
|
15542b954d | ||
|
|
6430d830c1 | ||
|
|
c3f6389291 | ||
|
|
070eef3eff | ||
|
|
b14d841d57 | ||
|
|
dd35ab026a | ||
|
|
7fc06db8ad | ||
|
|
9d1f09c0f3 | ||
|
|
cacfb183a6 | ||
|
|
564f4f7a60 | ||
|
|
113a118fcf | ||
|
|
1f930cdaf2 | ||
|
|
c490e0ce08 | ||
|
|
7640ee307c | ||
|
|
1f5f70f898 | ||
|
|
1430858112 | ||
|
|
48c27ec117 | ||
|
|
af7737e804 | ||
|
|
3eca0d2ba0 | ||
|
|
307259f096 | ||
|
|
bed01941a5 | ||
|
|
89fa43a3b6 | ||
|
|
d8fcb08abf | ||
|
|
c61bcd9f50 | ||
|
|
3fb0fcbbfb | ||
|
|
db9af5083f | ||
|
|
720f1bb65c | ||
|
|
7dfb318ba2 | ||
|
|
9b024da2b4 | ||
|
|
15ca3b727a | ||
|
|
74ca604ae0 | ||
|
|
6934b05c85 | ||
|
|
1a47a5317c | ||
|
|
bc3ef21c64 | ||
|
|
e329f5ad43 | ||
|
|
e6ad91bf89 | ||
|
|
2f586416a5 | ||
|
|
33b56f421c | ||
|
|
e58ee4c492 | ||
|
|
49691aa07e | ||
|
|
56570f235f | ||
|
|
a2d95cf5b6 | ||
|
|
704dbfd04a | ||
|
|
5d9e078043 | ||
|
|
875cde13ae | ||
|
|
77655aed86 | ||
|
|
0628b92d63 | ||
|
|
9e526d00c2 | ||
|
|
1a24396be8 | ||
|
|
d97e73a565 | ||
|
|
55b14c8aaf | ||
|
|
1cdd4b5980 | ||
|
|
79f65e57eb | ||
|
|
b4c8950278 | ||
|
|
400b2e9a55 | ||
|
|
3a687c583a | ||
|
|
833950078d | ||
|
|
e698dcb148 | ||
|
|
218386e077 | ||
|
|
4426be9e64 | ||
|
|
89ceecc870 | ||
|
|
86f4cf7857 | ||
|
|
49ae66d94a | ||
|
|
c10865c7ef | ||
|
|
687cccdb99 | ||
|
|
f3478a189a | ||
|
|
c84f8465b8 | ||
|
|
43db29176a | ||
|
|
f38922929c | ||
|
|
7d02c58f86 | ||
|
|
6edce8be87 | ||
|
|
31f63e38bd | ||
|
|
78a68ac3a7 | ||
|
|
8cd3bcd1c0 | ||
|
|
264cc5ef46 | ||
|
|
4b5c481b7a | ||
|
|
8bfbea5ed3 | ||
|
|
f06a66da07 | ||
|
|
337cae9b22 | ||
|
|
bf926bb7d5 | ||
|
|
18ad9a6af3 | ||
|
|
b6ed31c222 | ||
|
|
200beb5af5 | ||
|
|
f82a948bdd | ||
|
|
dd03e3ddcd | ||
|
|
7561b73e8f | ||
|
|
caa97608c7 | ||
|
|
72a6d1edc1 | ||
|
|
b8bf89c2f1 | ||
|
|
a1ade2b8c0 | ||
|
|
4bdcae1f8f | ||
|
|
4b22c84407 | ||
|
|
c9daf1db30 | ||
|
|
06d3cfbe97 | ||
|
|
71e4901313 | ||
|
|
2caa1b166d | ||
|
|
1b6ebede7b | ||
|
|
017d38eee2 | ||
|
|
78eb6b0338 | ||
|
|
3e8e0f6ddf | ||
|
|
8213f62d3b | ||
|
|
233740a40e | ||
|
|
8c5fcfd0fd | ||
|
|
6d7b231196 | ||
|
|
31ca314b02 | ||
|
|
0db304f1ee | ||
|
|
a3cb3e03f4 | ||
|
|
641a6cfdb7 | ||
|
|
f27471cea7 | ||
|
|
47508b8d6c | ||
|
|
28e0242907 | ||
|
|
96523ca01f | ||
|
|
c10a6fdab1 |
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -21,6 +21,20 @@ body:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install_method
|
||||
attributes:
|
||||
label: Install method
|
||||
description: How did you install Invoke?
|
||||
multiple: false
|
||||
options:
|
||||
- "Invoke's Launcher"
|
||||
- 'Stability Matrix'
|
||||
- 'Pinokio'
|
||||
- 'Manual'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: __Describe your environment__
|
||||
@@ -76,8 +90,8 @@ body:
|
||||
attributes:
|
||||
label: Version number
|
||||
description: |
|
||||
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. 3.6.1
|
||||
The version of Invoke you have installed. If it is not the [latest version](https://github.com/invoke-ai/InvokeAI/releases/latest), please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. v6.0.2
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -85,17 +99,17 @@ body:
|
||||
id: browser-version
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Your web browser and version.
|
||||
description: Your web browser and version, if you do not use the Launcher's provided GUI.
|
||||
placeholder: ex. Firefox 123.0b3
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: python-deps
|
||||
attributes:
|
||||
label: Python dependencies
|
||||
label: System Information
|
||||
description: |
|
||||
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||
Click the gear icon at the bottom left corner, then click "About". Click the copy button and then paste here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
8
.github/workflows/build-container.yml
vendored
8
.github/workflows/build-container.yml
vendored
@@ -45,6 +45,9 @@ jobs:
|
||||
steps:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
# the /mnt dir has 70GBs of free space
|
||||
# /dev/sda1 74G 28K 70G 1% /mnt
|
||||
# According to some online posts the /mnt is not always there, so checking before setting docker to use it
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
@@ -52,6 +55,11 @@ jobs:
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
if [ -d /mnt ]; then
|
||||
sudo chmod -R 777 /mnt
|
||||
echo '{"data-root": "/mnt/docker-root"}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
|
||||
12
.github/workflows/typegen-checks.yml
vendored
12
.github/workflows/typegen-checks.yml
vendored
@@ -39,6 +39,18 @@ jobs:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
- name: check for changed files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -190,3 +190,5 @@ installer/update.bat
|
||||
installer/update.sh
|
||||
installer/InvokeAI-Installer/
|
||||
.aider*
|
||||
|
||||
.claude/
|
||||
|
||||
@@ -22,6 +22,10 @@
|
||||
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
|
||||
# GPU_DRIVER=cuda #| rocm
|
||||
|
||||
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
|
||||
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
|
||||
# RENDER_GROUP_ID=
|
||||
|
||||
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
||||
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
|
||||
# CONTAINER_UID=1000
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
RUN corepack use pnpm@10.x && corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
@@ -44,7 +43,6 @@ ENV \
|
||||
UV_MANAGED_PYTHON=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
UV_INDEX="https://download.pytorch.org/whl/cu124" \
|
||||
INVOKEAI_ROOT=/invokeai \
|
||||
INVOKEAI_HOST=0.0.0.0 \
|
||||
INVOKEAI_PORT=9090 \
|
||||
@@ -75,19 +73,17 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
|
||||
fi && \
|
||||
uv sync --frozen
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
ulimit -n 30000 && \
|
||||
uv sync --extra $GPU_DRIVER --frozen
|
||||
|
||||
# Link amdgpu.ids for ROCm builds
|
||||
# contributed by https://github.com/Rubonnek
|
||||
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" && groupadd render
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
|
||||
@@ -106,8 +102,6 @@ COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
|
||||
fi && \
|
||||
uv pip install -e .
|
||||
ulimit -n 30000 && \
|
||||
uv pip install -e .[$GPU_DRIVER]
|
||||
|
||||
|
||||
136
docker/Dockerfile-rocm-full
Normal file
136
docker/Dockerfile-rocm-full
Normal file
@@ -0,0 +1,136 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
#### Web UI ------------------------------------
|
||||
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN npx vite build
|
||||
|
||||
## Backend ---------------------------------------
|
||||
|
||||
FROM library/ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
git \
|
||||
gosu \
|
||||
libglib2.0-0 \
|
||||
libgl1 \
|
||||
libglx-mesa0 \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev \
|
||||
wget
|
||||
|
||||
ENV \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
INVOKEAI_SRC=/opt/invokeai \
|
||||
PYTHON_VERSION=3.12 \
|
||||
UV_PYTHON=3.12 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_MANAGED_PYTHON=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
INVOKEAI_ROOT=/invokeai \
|
||||
INVOKEAI_HOST=0.0.0.0 \
|
||||
INVOKEAI_PORT=9090 \
|
||||
PATH="/opt/venv/bin:$PATH" \
|
||||
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
||||
CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
|
||||
ARG GPU_DRIVER=cuda
|
||||
|
||||
# Install `uv` for package management
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
||||
|
||||
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv python install ${PYTHON_VERSION} && \
|
||||
# chmod --recursive a+rX /root/.local/share/uv/python
|
||||
chmod 711 /root
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||
#
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is the default
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
ulimit -n 30000 && \
|
||||
uv sync --extra $GPU_DRIVER --frozen
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
if [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
wget -O /tmp/amdgpu-install.deb \
|
||||
https://repo.radeon.com/amdgpu-install/6.3.4/ubuntu/noble/amdgpu-install_6.3.60304-1_all.deb && \
|
||||
apt install -y /tmp/amdgpu-install.deb && \
|
||||
apt update && \
|
||||
amdgpu-install --usecase=rocm -y && \
|
||||
apt-get autoclean && \
|
||||
apt clean && \
|
||||
rm -rf /tmp/* /var/tmp/* && \
|
||||
usermod -a -G render ubuntu && \
|
||||
usermod -a -G video ubuntu && \
|
||||
echo "\\n/opt/rocm/lib\\n/opt/rocm/lib64" >> /etc/ld.so.conf.d/rocm.conf && \
|
||||
ldconfig && \
|
||||
update-alternatives --auto rocm; \
|
||||
fi
|
||||
|
||||
## Heathen711: Leaving this for review input, will remove before merge
|
||||
# RUN --mount=type=cache,target=/var/cache/apt \
|
||||
# --mount=type=cache,target=/var/lib/apt \
|
||||
# if [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
# groupadd render && \
|
||||
# usermod -a -G render ubuntu && \
|
||||
# usermod -a -G video ubuntu; \
|
||||
# fi
|
||||
|
||||
## Link amdgpu.ids for ROCm builds
|
||||
## contributed by https://github.com/Rubonnek
|
||||
# RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||
# ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web"]
|
||||
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# add sources last to minimize image changes on code changes
|
||||
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||
|
||||
# this should not increase image size because we've already installed dependencies
|
||||
# in a previous layer
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
ulimit -n 30000 && \
|
||||
uv pip install -e .[$GPU_DRIVER]
|
||||
|
||||
@@ -47,8 +47,9 @@ services:
|
||||
|
||||
invokeai-rocm:
|
||||
<<: *invokeai
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
environment:
|
||||
- AMD_VISIBLE_DEVICES=all
|
||||
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
|
||||
runtime: amd
|
||||
profiles:
|
||||
- rocm
|
||||
|
||||
@@ -21,6 +21,17 @@ _=$(id ${USER} 2>&1) || useradd -u ${USER_ID} ${USER}
|
||||
# ensure the UID is correct
|
||||
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||
|
||||
## ROCM specific configuration
|
||||
# render group within the container must match the host render group
|
||||
# otherwise the container will not be able to access the host GPU.
|
||||
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
|
||||
# ensure the render group exists
|
||||
groupmod -g ${RENDER_GROUP_ID} render
|
||||
usermod -a -G render ${USER}
|
||||
usermod -a -G video ${USER}
|
||||
fi
|
||||
|
||||
|
||||
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||
# We do not install openssh-server in the image by default to avoid bloat.
|
||||
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||
|
||||
@@ -13,7 +13,7 @@ run() {
|
||||
|
||||
# parse .env file for build args
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
profile="$(awk -F '=' '/GPU_DRIVER=/ {print $2}' .env)"
|
||||
|
||||
# default to 'cuda' profile
|
||||
[[ -z "$profile" ]] && profile="cuda"
|
||||
@@ -30,7 +30,7 @@ run() {
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
docker compose --profile "$profile" up -d "$service_name"
|
||||
docker compose logs -f
|
||||
docker compose --profile "$profile" logs -f
|
||||
}
|
||||
|
||||
run
|
||||
|
||||
@@ -265,7 +265,7 @@ If the key is unrecognized, this call raises an
|
||||
|
||||
#### exists(key) -> AnyModelConfig
|
||||
|
||||
Returns True if a model with the given key exists in the databsae.
|
||||
Returns True if a model with the given key exists in the database.
|
||||
|
||||
#### search_by_path(path) -> AnyModelConfig
|
||||
|
||||
@@ -718,7 +718,7 @@ When downloading remote models is implemented, additional
|
||||
configuration information, such as list of trigger terms, will be
|
||||
retrieved from the HuggingFace and Civitai model repositories.
|
||||
|
||||
The probed values can be overriden by providing a dictionary in the
|
||||
The probed values can be overridden by providing a dictionary in the
|
||||
optional `config` argument passed to `import_model()`. You may provide
|
||||
overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
@@ -841,7 +841,7 @@ variable.
|
||||
|
||||
#### installer.start(invoker)
|
||||
|
||||
The `start` method is called by the API intialization routines when
|
||||
The `start` method is called by the API initialization routines when
|
||||
the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
@@ -16,7 +16,7 @@ We thank [all contributors](https://github.com/invoke-ai/InvokeAI/graphs/contrib
|
||||
- @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
- @joshistoast (Josh Corbett) - Web Development
|
||||
- @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Software engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @sunija - Standalone version
|
||||
- @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
- @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
|
||||
@@ -41,7 +41,7 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
With the modifications made, the install command should look something like this:
|
||||
|
||||
```sh
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu126 --reinstall
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall
|
||||
```
|
||||
|
||||
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||
@@ -50,11 +50,11 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
|
||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||
|
||||
7. Install the frontend dev toolchain:
|
||||
7. Install the frontend dev toolchain, paying attention to versions:
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (v20+)
|
||||
- [`nodejs`](https://nodejs.org/) (tested on LTS, v22)
|
||||
|
||||
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||
- [`pnpm`](https://pnpm.io/installation) (tested on v10)
|
||||
|
||||
8. Do a production build of the frontend:
|
||||
|
||||
|
||||
@@ -297,7 +297,7 @@ Migration logic is in [migrations.ts].
|
||||
<!-- links -->
|
||||
|
||||
[pydantic]: https://github.com/pydantic/pydantic 'pydantic'
|
||||
[zod]: https://github.com/colinhacks/zod 'zod/v4'
|
||||
[zod]: https://github.com/colinhacks/zod 'zod'
|
||||
[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types'
|
||||
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
|
||||
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions
|
||||
|
||||
@@ -69,34 +69,34 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
|
||||
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
|
||||
|
||||
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using [UV's built in torch support.](https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection)
|
||||
|
||||
=== "Invoke v5.12 and later"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
|
||||
- **In all other cases, do not use a torch backend.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.0.0 to v5.9.1"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.1`.
|
||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v4"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm5.2`.
|
||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||
@@ -105,10 +105,10 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
|
||||
```
|
||||
|
||||
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
|
||||
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
|
||||
```
|
||||
|
||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
|
||||
@@ -41,7 +41,7 @@ Nodes have a "Use Cache" option in their footer. This allows for performance imp
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
### Noise
|
||||
### Create Latent Noise
|
||||
|
||||
An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input.
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ from invokeai.app.services.board_images.board_images_default import BoardImagesS
|
||||
from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
|
||||
from invokeai.app.services.boards.boards_default import BoardService
|
||||
from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
|
||||
from invokeai.app.services.client_state_persistence.client_state_persistence_sqlite import ClientStatePersistenceSqlite
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.download.download_default import DownloadQueueService
|
||||
from invokeai.app.services.events.events_fastapievents import FastAPIEventService
|
||||
@@ -151,6 +152,7 @@ class ApiDependencies:
|
||||
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
||||
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
||||
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
|
||||
client_state_persistence = ClientStatePersistenceSqlite(db=db)
|
||||
|
||||
services = InvocationServices(
|
||||
board_image_records=board_image_records,
|
||||
@@ -181,6 +183,7 @@ class ApiDependencies:
|
||||
style_preset_records=style_preset_records,
|
||||
style_preset_image_files=style_preset_image_files,
|
||||
workflow_thumbnails=workflow_thumbnails,
|
||||
client_state_persistence=client_state_persistence,
|
||||
)
|
||||
|
||||
ApiDependencies.invoker = Invoker(services)
|
||||
|
||||
58
invokeai/app/api/routers/client_state.py
Normal file
58
invokeai/app/api/routers/client_state.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.backend.util.logging import logging
|
||||
|
||||
client_state_router = APIRouter(prefix="/v1/client_state", tags=["client_state"])
|
||||
|
||||
|
||||
@client_state_router.get(
|
||||
"/{queue_id}/get_by_key",
|
||||
operation_id="get_client_state_by_key",
|
||||
response_model=str | None,
|
||||
)
|
||||
async def get_client_state_by_key(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
key: str = Query(..., description="Key to get"),
|
||||
) -> str | None:
|
||||
"""Gets the client state"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.client_state_persistence.get_by_key(queue_id, key)
|
||||
except Exception as e:
|
||||
logging.error(f"Error getting client state: {e}")
|
||||
raise HTTPException(status_code=500, detail="Error setting client state")
|
||||
|
||||
|
||||
@client_state_router.post(
|
||||
"/{queue_id}/set_by_key",
|
||||
operation_id="set_client_state",
|
||||
response_model=str,
|
||||
)
|
||||
async def set_client_state(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
key: str = Query(..., description="Key to set"),
|
||||
value: str = Body(..., description="Stringified value to set"),
|
||||
) -> str:
|
||||
"""Sets the client state"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.client_state_persistence.set_by_key(queue_id, key, value)
|
||||
except Exception as e:
|
||||
logging.error(f"Error setting client state: {e}")
|
||||
raise HTTPException(status_code=500, detail="Error setting client state")
|
||||
|
||||
|
||||
@client_state_router.post(
|
||||
"/{queue_id}/delete",
|
||||
operation_id="delete_client_state",
|
||||
responses={204: {"description": "Client state deleted"}},
|
||||
)
|
||||
async def delete_client_state(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> None:
|
||||
"""Deletes the client state"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.client_state_persistence.delete(queue_id)
|
||||
except Exception as e:
|
||||
logging.error(f"Error deleting client state: {e}")
|
||||
raise HTTPException(status_code=500, detail="Error deleting client state")
|
||||
@@ -19,6 +19,7 @@ from invokeai.app.api.routers import (
|
||||
app_info,
|
||||
board_images,
|
||||
boards,
|
||||
client_state,
|
||||
download_queue,
|
||||
images,
|
||||
model_manager,
|
||||
@@ -131,6 +132,7 @@ app.include_router(app_info.app_router, prefix="/api")
|
||||
app.include_router(session_queue.session_queue_router, prefix="/api")
|
||||
app.include_router(workflows.workflows_router, prefix="/api")
|
||||
app.include_router(style_presets.style_presets_router, prefix="/api")
|
||||
app.include_router(client_state.client_state_router, prefix="/api")
|
||||
|
||||
app.openapi = get_openapi_func(app)
|
||||
|
||||
@@ -155,6 +157,12 @@ def overridden_redoc() -> HTMLResponse:
|
||||
|
||||
web_root_path = Path(list(web_dir.__path__)[0])
|
||||
|
||||
if app_config.unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
"The unsafe_disable_picklescan option is enabled. This disables malware scanning while installing and"
|
||||
"loading models, which may allow malicious code to be executed. Use at your own risk."
|
||||
)
|
||||
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
|
||||
@@ -36,9 +36,19 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image: ImageField = InputField(description="The image to encode.")
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
def _estimate_working_memory(self, image_tensor: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# Encode operations use approximately 50% of the memory required for decode operations
|
||||
h = image_tensor.shape[-2]
|
||||
w = image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor, estimated_working_memory: int) -> torch.Tensor:
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
@@ -62,7 +72,12 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
|
||||
)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
|
||||
@@ -63,7 +63,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="4.0.0",
|
||||
version="4.1.0",
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation):
|
||||
"""Run denoising process with a FLUX transformer model."""
|
||||
@@ -153,7 +153,7 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection
|
||||
)
|
||||
|
||||
kontext_conditioning: Optional[FluxKontextConditioningField] = InputField(
|
||||
kontext_conditioning: FluxKontextConditioningField | list[FluxKontextConditioningField] | None = InputField(
|
||||
default=None,
|
||||
description="FLUX Kontext conditioning (reference image).",
|
||||
input=Input.Connection,
|
||||
@@ -328,6 +328,21 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
cfg_scale_end_step=self.cfg_scale_end_step,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning
|
||||
if isinstance(self.kontext_conditioning, list)
|
||||
else [self.kontext_conditioning],
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
with ExitStack() as exit_stack:
|
||||
# Prepare ControlNet extensions.
|
||||
# Note: We do this before loading the transformer model to minimize peak memory (see implementation).
|
||||
@@ -385,19 +400,6 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning is not None:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning,
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
# Prepare Kontext conditioning if provided
|
||||
img_cond_seq = None
|
||||
img_cond_seq_ids = None
|
||||
|
||||
@@ -35,14 +35,24 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def _estimate_working_memory(self, image_tensor: torch.Tensor, vae: AutoEncoder) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# Encode operations use approximately 50% of the memory required for decode operations
|
||||
h = image_tensor.shape[-2]
|
||||
w = image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor, estimated_working_memory: int) -> torch.Tensor:
|
||||
# TODO(ryand): Expose seed parameter at the invocation level.
|
||||
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
|
||||
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
|
||||
# should be used for VAE encode sampling.
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
@@ -60,7 +70,10 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE")
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
|
||||
)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
|
||||
@@ -1347,3 +1347,96 @@ class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoar
|
||||
|
||||
image_dto = context.images.save(image=target_image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_kontext_image_prep",
|
||||
title="FLUX Kontext Image Prep",
|
||||
tags=["image", "concatenate", "flux", "kontext"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
)
|
||||
class FluxKontextConcatenateImagesInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Prepares an image or images for use with FLUX Kontext. The first/single image is resized to the nearest
|
||||
preferred Kontext resolution. All other images are concatenated horizontally, maintaining their aspect ratio."""
|
||||
|
||||
images: list[ImageField] = InputField(
|
||||
description="The images to concatenate",
|
||||
min_length=1,
|
||||
max_length=10,
|
||||
)
|
||||
|
||||
use_preferred_resolution: bool = InputField(
|
||||
default=True, description="Use FLUX preferred resolutions for the first image"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
from invokeai.backend.flux.util import PREFERED_KONTEXT_RESOLUTIONS
|
||||
|
||||
# Step 1: Load all images
|
||||
pil_images = []
|
||||
for image_field in self.images:
|
||||
image = context.images.get_pil(image_field.image_name, mode="RGBA")
|
||||
pil_images.append(image)
|
||||
|
||||
# Step 2: Determine target resolution for the first image
|
||||
first_image = pil_images[0]
|
||||
width, height = first_image.size
|
||||
|
||||
if self.use_preferred_resolution:
|
||||
aspect_ratio = width / height
|
||||
|
||||
# Find the closest preferred resolution for the first image
|
||||
_, target_width, target_height = min(
|
||||
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
|
||||
)
|
||||
|
||||
# Apply BFL's scaling formula
|
||||
scaled_height = 2 * int(target_height / 16)
|
||||
final_height = 8 * scaled_height # This will be consistent for all images
|
||||
scaled_width = 2 * int(target_width / 16)
|
||||
first_width = 8 * scaled_width
|
||||
else:
|
||||
# Use original dimensions of first image, ensuring divisibility by 16
|
||||
final_height = 16 * (height // 16)
|
||||
first_width = 16 * (width // 16)
|
||||
# Ensure minimum dimensions
|
||||
if final_height < 16:
|
||||
final_height = 16
|
||||
if first_width < 16:
|
||||
first_width = 16
|
||||
|
||||
# Step 3: Process and resize all images with consistent height
|
||||
processed_images = []
|
||||
total_width = 0
|
||||
|
||||
for i, image in enumerate(pil_images):
|
||||
if i == 0:
|
||||
# First image uses the calculated dimensions
|
||||
final_width = first_width
|
||||
else:
|
||||
# Subsequent images maintain aspect ratio with the same height
|
||||
img_aspect_ratio = image.width / image.height
|
||||
# Calculate width that maintains aspect ratio at the target height
|
||||
calculated_width = int(final_height * img_aspect_ratio)
|
||||
# Ensure width is divisible by 16 for proper VAE encoding
|
||||
final_width = 16 * (calculated_width // 16)
|
||||
# Ensure minimum width
|
||||
if final_width < 16:
|
||||
final_width = 16
|
||||
|
||||
# Resize image to calculated dimensions
|
||||
resized_image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
|
||||
processed_images.append(resized_image)
|
||||
total_width += final_width
|
||||
|
||||
# Step 4: Concatenate images horizontally
|
||||
concatenated_image = Image.new("RGB", (total_width, final_height))
|
||||
x_offset = 0
|
||||
for img in processed_images:
|
||||
concatenated_image.paste(img, (x_offset, 0))
|
||||
x_offset += img.width
|
||||
|
||||
# Save the concatenated image
|
||||
image_dto = context.images.save(image=concatenated_image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -52,11 +52,48 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
|
||||
def _estimate_working_memory(
|
||||
self, image_tensor: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# Encode operations use approximately 50% of the memory required for decode operations
|
||||
element_size = 4 if self.fp32 else 2
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
|
||||
if use_tiling:
|
||||
tile_size = self.tile_size
|
||||
if tile_size == 0:
|
||||
tile_size = vae.tile_sample_min_size
|
||||
assert isinstance(tile_size, int)
|
||||
h = tile_size
|
||||
w = tile_size
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
|
||||
# and number of tiles. We could make this more precise in the future, but this should be good enough for
|
||||
# most use cases.
|
||||
working_memory = working_memory * 1.25
|
||||
else:
|
||||
h = image_tensor.shape[-2]
|
||||
w = image_tensor.shape[-1]
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
if self.fp32:
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(
|
||||
vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor, tile_size: int = 0
|
||||
vae_info: LoadedModel,
|
||||
upcast: bool,
|
||||
tiled: bool,
|
||||
image_tensor: torch.Tensor,
|
||||
tile_size: int = 0,
|
||||
estimated_working_memory: int = 0,
|
||||
) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
orig_dtype = vae.dtype
|
||||
if upcast:
|
||||
@@ -113,14 +150,23 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
use_tiling = self.tiled or context.config.get().force_tiled_decode
|
||||
estimated_working_memory = self._estimate_working_memory(image_tensor, use_tiling, vae_info.model)
|
||||
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
|
||||
vae_info=vae_info,
|
||||
upcast=self.fp32,
|
||||
tiled=self.tiled,
|
||||
image_tensor=image_tensor,
|
||||
tile_size=self.tile_size,
|
||||
estimated_working_memory=estimated_working_memory,
|
||||
)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -430,6 +430,15 @@ class FluxConditioningOutput(BaseInvocationOutput):
|
||||
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("flux_conditioning_collection_output")
|
||||
class FluxConditioningCollectionOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of conditioning tensors"""
|
||||
|
||||
collection: list[FluxConditioningField] = OutputField(
|
||||
description="The output conditioning tensors",
|
||||
)
|
||||
|
||||
|
||||
@invocation_output("sd3_conditioning_output")
|
||||
class SD3ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single SD3 conditioning tensor"""
|
||||
|
||||
@@ -32,9 +32,19 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image: ImageField = InputField(description="The image to encode")
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
def _estimate_working_memory(self, image_tensor: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# Encode operations use approximately 50% of the memory required for decode operations
|
||||
h = image_tensor.shape[-2]
|
||||
w = image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor, estimated_working_memory: int) -> torch.Tensor:
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
@@ -58,7 +68,12 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
|
||||
)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class ClientStatePersistenceABC(ABC):
|
||||
"""
|
||||
Base class for client persistence implementations.
|
||||
This class defines the interface for persisting client data.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def set_by_key(self, queue_id: str, key: str, value: str) -> str:
|
||||
"""
|
||||
Set a key-value pair for the client.
|
||||
|
||||
Args:
|
||||
key (str): The key to set.
|
||||
value (str): The value to set for the key.
|
||||
|
||||
Returns:
|
||||
str: The value that was set.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_by_key(self, queue_id: str, key: str) -> str | None:
|
||||
"""
|
||||
Get the value for a specific key of the client.
|
||||
|
||||
Args:
|
||||
key (str): The key to retrieve the value for.
|
||||
|
||||
Returns:
|
||||
str | None: The value associated with the key, or None if the key does not exist.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, queue_id: str) -> None:
|
||||
"""
|
||||
Delete all client state.
|
||||
"""
|
||||
pass
|
||||
@@ -0,0 +1,65 @@
|
||||
import json
|
||||
|
||||
from invokeai.app.services.client_state_persistence.client_state_persistence_base import ClientStatePersistenceABC
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
|
||||
class ClientStatePersistenceSqlite(ClientStatePersistenceABC):
|
||||
"""
|
||||
Base class for client persistence implementations.
|
||||
This class defines the interface for persisting client data.
|
||||
"""
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._db = db
|
||||
self._default_row_id = 1
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
|
||||
def _get(self) -> dict[str, str] | None:
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT data FROM client_state
|
||||
WHERE id = {self._default_row_id}
|
||||
"""
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return json.loads(row[0])
|
||||
|
||||
def set_by_key(self, queue_id: str, key: str, value: str) -> str:
|
||||
state = self._get() or {}
|
||||
state.update({key: value})
|
||||
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
INSERT INTO client_state (id, data)
|
||||
VALUES ({self._default_row_id}, ?)
|
||||
ON CONFLICT(id) DO UPDATE
|
||||
SET data = excluded.data;
|
||||
""",
|
||||
(json.dumps(state),),
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
def get_by_key(self, queue_id: str, key: str) -> str | None:
|
||||
state = self._get()
|
||||
if state is None:
|
||||
return None
|
||||
return state.get(key, None)
|
||||
|
||||
def delete(self, queue_id: str) -> None:
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
DELETE FROM client_state
|
||||
WHERE id = {self._default_row_id}
|
||||
"""
|
||||
)
|
||||
@@ -107,6 +107,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
|
||||
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
|
||||
unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.
|
||||
"""
|
||||
|
||||
_root: Optional[Path] = PrivateAttr(default=None)
|
||||
@@ -196,6 +197,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
|
||||
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
|
||||
scan_models_on_startup: bool = Field(default=False, description="Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.")
|
||||
unsafe_disable_picklescan: bool = Field(default=False, description="UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.")
|
||||
|
||||
# fmt: on
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ if TYPE_CHECKING:
|
||||
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
|
||||
from invokeai.app.services.boards.boards_base import BoardServiceABC
|
||||
from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase
|
||||
from invokeai.app.services.client_state_persistence.client_state_persistence_base import ClientStatePersistenceABC
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadQueueServiceBase
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
@@ -73,6 +74,7 @@ class InvocationServices:
|
||||
style_preset_records: "StylePresetRecordsStorageBase",
|
||||
style_preset_image_files: "StylePresetImageFileStorageBase",
|
||||
workflow_thumbnails: "WorkflowThumbnailServiceBase",
|
||||
client_state_persistence: "ClientStatePersistenceABC",
|
||||
):
|
||||
self.board_images = board_images
|
||||
self.board_image_records = board_image_records
|
||||
@@ -102,3 +104,4 @@ class InvocationServices:
|
||||
self.style_preset_records = style_preset_records
|
||||
self.style_preset_image_files = style_preset_image_files
|
||||
self.workflow_thumbnails = workflow_thumbnails
|
||||
self.client_state_persistence = client_state_persistence
|
||||
|
||||
@@ -7,7 +7,7 @@ import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from queue import Empty, Queue
|
||||
from shutil import copyfile, copytree, move, rmtree
|
||||
from shutil import move, rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
@@ -51,6 +51,7 @@ from invokeai.backend.model_manager.metadata import (
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||
from invokeai.backend.model_manager.util.lora_metadata_extractor import apply_lora_metadata
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
@@ -185,13 +186,15 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
|
||||
|
||||
if preferred_name := config.name:
|
||||
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
|
||||
if Path(model_path).is_file():
|
||||
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
|
||||
preferred_name = f"{preferred_name}{model_path.suffix}"
|
||||
|
||||
dest_path = (
|
||||
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
|
||||
)
|
||||
try:
|
||||
new_path = self._copy_model(model_path, dest_path)
|
||||
new_path = self._move_model(model_path, dest_path)
|
||||
except FileExistsError as excp:
|
||||
raise DuplicateModelException(
|
||||
f"A model named {model_path.name} is already installed at {dest_path.as_posix()}"
|
||||
@@ -616,30 +619,17 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self.record_store.update_model(key, ModelRecordChanges(path=model.path))
|
||||
return model
|
||||
|
||||
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if old_path.is_dir():
|
||||
copytree(old_path, new_path)
|
||||
else:
|
||||
copyfile(old_path, new_path)
|
||||
return new_path
|
||||
|
||||
def _move_model(self, old_path: Path, new_path: Path) -> Path:
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
|
||||
if new_path.exists():
|
||||
raise FileExistsError(f"Cannot move {old_path} to {new_path}: destination already exists")
|
||||
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# if path already exists then we jigger the name to make it unique
|
||||
counter: int = 1
|
||||
while new_path.exists():
|
||||
path = new_path.with_stem(new_path.stem + f"_{counter:02d}")
|
||||
if not path.exists():
|
||||
new_path = path
|
||||
counter += 1
|
||||
move(old_path, new_path)
|
||||
|
||||
return new_path
|
||||
|
||||
def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None):
|
||||
@@ -667,6 +657,10 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
info = info or self._probe(model_path, config)
|
||||
|
||||
# Apply LoRA metadata if applicable
|
||||
model_images_path = self.app_config.models_path / "model_images"
|
||||
apply_lora_metadata(info, model_path.resolve(), model_images_path)
|
||||
|
||||
model_path = model_path.resolve()
|
||||
|
||||
# Models in the Invoke-managed models dir should use relative paths.
|
||||
|
||||
@@ -87,9 +87,21 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
def torch_load_file(checkpoint: Path) -> AnyModel:
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||
if self._app_config.unsafe_disable_picklescan:
|
||||
self._logger.warning(
|
||||
f"Model at {checkpoint} is potentially infected by malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||
if scan_result.scan_err:
|
||||
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
|
||||
if self._app_config.unsafe_disable_picklescan:
|
||||
self._logger.warning(
|
||||
f"Error scanning model at {checkpoint} for malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
|
||||
|
||||
result = torch_load(checkpoint, map_location="cpu")
|
||||
return result
|
||||
|
||||
@@ -23,6 +23,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_19 import build_migration_19
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_20 import build_migration_20
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_21 import build_migration_21
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -63,6 +64,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_18())
|
||||
migrator.register_migration(build_migration_19(app_config=config))
|
||||
migrator.register_migration(build_migration_20())
|
||||
migrator.register_migration(build_migration_21())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration21Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE client_state (
|
||||
id INTEGER PRIMARY KEY CHECK(id = 1),
|
||||
data TEXT NOT NULL, -- Frontend will handle the shape of this data
|
||||
updated_at DATETIME NOT NULL DEFAULT (CURRENT_TIMESTAMP)
|
||||
);
|
||||
"""
|
||||
)
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TRIGGER tg_client_state_updated_at
|
||||
AFTER UPDATE ON client_state
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE client_state
|
||||
SET updated_at = CURRENT_TIMESTAMP
|
||||
WHERE id = OLD.id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def build_migration_21() -> Migration:
|
||||
"""Builds the migration object for migrating from version 20 to version 21. This includes:
|
||||
- Creating the `client_state` table.
|
||||
- Adding a trigger to update the `updated_at` field on updates.
|
||||
"""
|
||||
return Migration(
|
||||
from_version=20,
|
||||
to_version=21,
|
||||
callback=Migration21Callback(),
|
||||
)
|
||||
@@ -112,7 +112,7 @@ def denoise(
|
||||
)
|
||||
|
||||
# Slice prediction to only include the main image tokens
|
||||
if img_input_ids is not None:
|
||||
if img_cond_seq is not None:
|
||||
pred = pred[:, :original_seq_len]
|
||||
|
||||
step_cfg_scale = cfg_scale[step_index]
|
||||
@@ -125,9 +125,26 @@ def denoise(
|
||||
if neg_regional_prompting_extension is None:
|
||||
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
|
||||
|
||||
# For negative prediction with Kontext, we need to include the reference images
|
||||
# to maintain consistency between positive and negative passes. Without this,
|
||||
# CFG would create artifacts as the attention mechanism would see different
|
||||
# spatial structures in each pass
|
||||
neg_img_input = img
|
||||
neg_img_input_ids = img_ids
|
||||
|
||||
# Add channel-wise conditioning for negative pass if present
|
||||
if img_cond is not None:
|
||||
neg_img_input = torch.cat((neg_img_input, img_cond), dim=-1)
|
||||
|
||||
# Add sequence-wise conditioning (Kontext) for negative pass
|
||||
# This ensures reference images are processed consistently
|
||||
if img_cond_seq is not None:
|
||||
neg_img_input = torch.cat((neg_img_input, img_cond_seq), dim=1)
|
||||
neg_img_input_ids = torch.cat((neg_img_input_ids, img_cond_seq_ids), dim=1)
|
||||
|
||||
neg_pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
img=neg_img_input,
|
||||
img_ids=neg_img_input_ids,
|
||||
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
@@ -140,6 +157,10 @@ def denoise(
|
||||
ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
regional_prompting_extension=neg_regional_prompting_extension,
|
||||
)
|
||||
|
||||
# Slice negative prediction to match main image tokens
|
||||
if img_cond_seq is not None:
|
||||
neg_pred = neg_pred[:, :original_seq_len]
|
||||
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
|
||||
|
||||
preview_img = img - t_curr * pred
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import einops
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torchvision.transforms as T
|
||||
from einops import repeat
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.fields import FluxKontextConditioningField
|
||||
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.sampling_utils import pack
|
||||
from invokeai.backend.flux.util import PREFERED_KONTEXT_RESOLUTIONS
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def generate_img_ids_with_offset(
|
||||
@@ -19,8 +18,10 @@ def generate_img_ids_with_offset(
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
idx_offset: int = 0,
|
||||
h_offset: int = 0,
|
||||
w_offset: int = 0,
|
||||
) -> torch.Tensor:
|
||||
"""Generate tensor of image position ids with an optional offset.
|
||||
"""Generate tensor of image position ids with optional index and spatial offsets.
|
||||
|
||||
Args:
|
||||
latent_height (int): Height of image in latent space (after packing, this becomes h//2).
|
||||
@@ -28,7 +29,9 @@ def generate_img_ids_with_offset(
|
||||
batch_size (int): Number of images in the batch.
|
||||
device (torch.device): Device to create tensors on.
|
||||
dtype (torch.dtype): Data type for the tensors.
|
||||
idx_offset (int): Offset to add to the first dimension of the image ids.
|
||||
idx_offset (int): Offset to add to the first dimension of the image ids (default: 0).
|
||||
h_offset (int): Spatial offset for height/y-coordinates in latent space (default: 0).
|
||||
w_offset (int): Spatial offset for width/x-coordinates in latent space (default: 0).
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Image position ids with shape [batch_size, (latent_height//2 * latent_width//2), 3].
|
||||
@@ -42,6 +45,10 @@ def generate_img_ids_with_offset(
|
||||
packed_height = latent_height // 2
|
||||
packed_width = latent_width // 2
|
||||
|
||||
# Convert spatial offsets from latent space to packed space
|
||||
packed_h_offset = h_offset // 2
|
||||
packed_w_offset = w_offset // 2
|
||||
|
||||
# Create base tensor for position IDs with shape [packed_height, packed_width, 3]
|
||||
# The 3 channels represent: [batch_offset, y_position, x_position]
|
||||
img_ids = torch.zeros(packed_height, packed_width, 3, device=device, dtype=dtype)
|
||||
@@ -49,13 +56,13 @@ def generate_img_ids_with_offset(
|
||||
# Set the batch offset for all positions
|
||||
img_ids[..., 0] = idx_offset
|
||||
|
||||
# Create y-coordinate indices (vertical positions)
|
||||
y_indices = torch.arange(packed_height, device=device, dtype=dtype)
|
||||
# Create y-coordinate indices (vertical positions) with spatial offset
|
||||
y_indices = torch.arange(packed_height, device=device, dtype=dtype) + packed_h_offset
|
||||
# Broadcast y_indices to match the spatial dimensions [packed_height, 1]
|
||||
img_ids[..., 1] = y_indices[:, None]
|
||||
|
||||
# Create x-coordinate indices (horizontal positions)
|
||||
x_indices = torch.arange(packed_width, device=device, dtype=dtype)
|
||||
# Create x-coordinate indices (horizontal positions) with spatial offset
|
||||
x_indices = torch.arange(packed_width, device=device, dtype=dtype) + packed_w_offset
|
||||
# Broadcast x_indices to match the spatial dimensions [1, packed_width]
|
||||
img_ids[..., 2] = x_indices[None, :]
|
||||
|
||||
@@ -73,14 +80,14 @@ class KontextExtension:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
kontext_conditioning: FluxKontextConditioningField,
|
||||
kontext_conditioning: list[FluxKontextConditioningField],
|
||||
context: InvocationContext,
|
||||
vae_field: VAEField,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
):
|
||||
"""
|
||||
Initializes the KontextExtension, pre-processing the reference image
|
||||
Initializes the KontextExtension, pre-processing the reference images
|
||||
into latents and positional IDs.
|
||||
"""
|
||||
self._context = context
|
||||
@@ -93,54 +100,116 @@ class KontextExtension:
|
||||
self.kontext_latents, self.kontext_ids = self._prepare_kontext()
|
||||
|
||||
def _prepare_kontext(self) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Encodes the reference image and prepares its latents and IDs."""
|
||||
image = self._context.images.get_pil(self.kontext_conditioning.image.image_name)
|
||||
"""Encodes the reference images and prepares their concatenated latents and IDs with spatial tiling."""
|
||||
all_latents = []
|
||||
all_ids = []
|
||||
|
||||
# Calculate aspect ratio of input image
|
||||
width, height = image.size
|
||||
aspect_ratio = width / height
|
||||
# Track cumulative dimensions for spatial tiling
|
||||
# These track the running extent of the virtual canvas in latent space
|
||||
canvas_h = 0 # Running canvas height
|
||||
canvas_w = 0 # Running canvas width
|
||||
|
||||
# Find the closest preferred resolution by aspect ratio
|
||||
_, target_width, target_height = min(
|
||||
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
|
||||
)
|
||||
|
||||
# Apply BFL's scaling formula
|
||||
# This ensures compatibility with the model's training
|
||||
scaled_width = 2 * int(target_width / 16)
|
||||
scaled_height = 2 * int(target_height / 16)
|
||||
|
||||
# Resize to the exact resolution used during training
|
||||
image = image.convert("RGB")
|
||||
final_width = 8 * scaled_width
|
||||
final_height = 8 * scaled_height
|
||||
image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
|
||||
|
||||
# Convert to tensor with same normalization as BFL
|
||||
image_np = np.array(image)
|
||||
image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0
|
||||
image_tensor = einops.rearrange(image_tensor, "h w c -> 1 c h w")
|
||||
image_tensor = image_tensor.to(self._device)
|
||||
|
||||
# Continue with VAE encoding
|
||||
vae_info = self._context.models.load(self._vae_field.vae)
|
||||
kontext_latents_unpacked = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
# Extract tensor dimensions
|
||||
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||
for idx, kontext_field in enumerate(self.kontext_conditioning):
|
||||
image = self._context.images.get_pil(kontext_field.image.image_name)
|
||||
|
||||
# Pack the latents and generate IDs
|
||||
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
||||
kontext_ids = generate_img_ids_with_offset(
|
||||
latent_height=latent_height,
|
||||
latent_width=latent_width,
|
||||
batch_size=batch_size,
|
||||
device=self._device,
|
||||
dtype=self._dtype,
|
||||
idx_offset=1,
|
||||
)
|
||||
# Convert to RGB
|
||||
image = image.convert("RGB")
|
||||
|
||||
return kontext_latents_packed, kontext_ids
|
||||
# Convert to tensor using torchvision transforms for consistency
|
||||
transformation = T.Compose(
|
||||
[
|
||||
T.ToTensor(), # Converts PIL image to tensor and scales to [0, 1]
|
||||
]
|
||||
)
|
||||
image_tensor = transformation(image)
|
||||
# Convert from [0, 1] to [-1, 1] range expected by VAE
|
||||
image_tensor = image_tensor * 2.0 - 1.0
|
||||
image_tensor = image_tensor.unsqueeze(0) # Add batch dimension
|
||||
image_tensor = image_tensor.to(self._device)
|
||||
|
||||
# Continue with VAE encoding
|
||||
# Don't sample from the distribution for reference images - use the mean (matching ComfyUI)
|
||||
# Estimate working memory for encode operation (50% of decode memory requirements)
|
||||
img_h = image_tensor.shape[-2]
|
||||
img_w = image_tensor.shape[-1]
|
||||
element_size = next(vae_info.model.parameters()).element_size()
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
estimated_working_memory = int(img_h * img_w * element_size * scaling_constant)
|
||||
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
# Use sample=False to get the distribution mean without noise
|
||||
kontext_latents_unpacked = vae.encode(image_tensor, sample=False)
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
# Extract tensor dimensions
|
||||
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||
|
||||
# Pad latents to be compatible with patch_size=2
|
||||
# This ensures dimensions are even for the pack() function
|
||||
pad_h = (2 - latent_height % 2) % 2
|
||||
pad_w = (2 - latent_width % 2) % 2
|
||||
if pad_h > 0 or pad_w > 0:
|
||||
kontext_latents_unpacked = F.pad(kontext_latents_unpacked, (0, pad_w, 0, pad_h), mode="circular")
|
||||
# Update dimensions after padding
|
||||
_, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||
|
||||
# Pack the latents
|
||||
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
||||
|
||||
# Determine spatial offsets for this reference image
|
||||
h_offset = 0
|
||||
w_offset = 0
|
||||
|
||||
if idx > 0: # First image starts at (0, 0)
|
||||
# Calculate potential canvas dimensions for each tiling option
|
||||
# Option 1: Tile vertically (below existing content)
|
||||
potential_h_vertical = canvas_h + latent_height
|
||||
|
||||
# Option 2: Tile horizontally (to the right of existing content)
|
||||
potential_w_horizontal = canvas_w + latent_width
|
||||
|
||||
# Choose arrangement that minimizes the maximum dimension
|
||||
# This keeps the canvas closer to square, optimizing attention computation
|
||||
if potential_h_vertical > potential_w_horizontal:
|
||||
# Tile horizontally (to the right of existing images)
|
||||
w_offset = canvas_w
|
||||
canvas_w = canvas_w + latent_width
|
||||
canvas_h = max(canvas_h, latent_height)
|
||||
else:
|
||||
# Tile vertically (below existing images)
|
||||
h_offset = canvas_h
|
||||
canvas_h = canvas_h + latent_height
|
||||
canvas_w = max(canvas_w, latent_width)
|
||||
else:
|
||||
# First image - just set canvas dimensions
|
||||
canvas_h = latent_height
|
||||
canvas_w = latent_width
|
||||
|
||||
# Generate IDs with both index offset and spatial offsets
|
||||
kontext_ids = generate_img_ids_with_offset(
|
||||
latent_height=latent_height,
|
||||
latent_width=latent_width,
|
||||
batch_size=batch_size,
|
||||
device=self._device,
|
||||
dtype=self._dtype,
|
||||
idx_offset=1, # All reference images use index=1 (matching ComfyUI implementation)
|
||||
h_offset=h_offset,
|
||||
w_offset=w_offset,
|
||||
)
|
||||
|
||||
all_latents.append(kontext_latents_packed)
|
||||
all_ids.append(kontext_ids)
|
||||
|
||||
# Concatenate all latents and IDs along the sequence dimension
|
||||
concatenated_latents = torch.cat(all_latents, dim=1) # Concatenate along sequence dimension
|
||||
concatenated_ids = torch.cat(all_ids, dim=1) # Concatenate along sequence dimension
|
||||
|
||||
return concatenated_latents, concatenated_ids
|
||||
|
||||
def ensure_batch_size(self, target_batch_size: int) -> None:
|
||||
"""Ensures the kontext latents and IDs match the target batch size by repeating if necessary."""
|
||||
|
||||
@@ -9,6 +9,7 @@ import spandrel
|
||||
import torch
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.flux.controlnet.state_dict_utils import (
|
||||
is_state_dict_instantx_controlnet,
|
||||
@@ -493,9 +494,21 @@ class ModelProbe(object):
|
||||
# scan model
|
||||
scan_result = pscan.scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
raise Exception(f"The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
if get_config().unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
f"The model {model_name} is potentially infected by malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(f"The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
if scan_result.scan_err:
|
||||
raise Exception(f"Error scanning model {model_name} for malware. Aborting import.")
|
||||
if get_config().unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
f"Error scanning the model at {model_name} for malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(f"Error scanning the model at {model_name} for malware. Aborting import.")
|
||||
|
||||
|
||||
# Probing utilities
|
||||
|
||||
@@ -6,13 +6,17 @@ import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
from safetensors import safe_open
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
StateDict: TypeAlias = dict[str | int, Any] # When are the keys int?
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
class ModelOnDisk:
|
||||
"""A utility class representing a model stored on disk."""
|
||||
@@ -79,8 +83,24 @@ class ModelOnDisk:
|
||||
with SilenceWarnings():
|
||||
if path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise RuntimeError(f"The model {path.stem} is potentially infected by malware. Aborting import.")
|
||||
if scan_result.infected_files != 0:
|
||||
if get_config().unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
f"The model {path.stem} is potentially infected by malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"The model {path.stem} is potentially infected by malware. Aborting import."
|
||||
)
|
||||
if scan_result.scan_err:
|
||||
if get_config().unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
f"Error scanning the model at {path.stem} for malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(f"Error scanning the model at {path.stem} for malware. Aborting import.")
|
||||
checkpoint = torch.load(path, map_location="cpu")
|
||||
assert isinstance(checkpoint, dict)
|
||||
elif path.suffix.endswith(".gguf"):
|
||||
|
||||
@@ -149,13 +149,29 @@ flux_kontext = StarterModel(
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_kontext_quantized = StarterModel(
|
||||
name="FLUX.1 Kontext dev (Quantized)",
|
||||
name="FLUX.1 Kontext dev (quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/unsloth/FLUX.1-Kontext-dev-GGUF/resolve/main/flux1-kontext-dev-Q4_K_M.gguf",
|
||||
description="FLUX.1 Kontext dev quantized (q4_k_m). Total size with dependencies: ~14GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_krea = StarterModel(
|
||||
name="FLUX.1 Krea dev",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/InvokeAI/FLUX.1-Krea-dev/resolve/main/flux1-krea-dev.safetensors",
|
||||
description="FLUX.1 Krea dev. Total size with dependencies: ~33GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_krea_quantized = StarterModel(
|
||||
name="FLUX.1 Krea dev (quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/InvokeAI/FLUX.1-Krea-dev-GGUF/resolve/main/flux1-krea-dev-Q4_K_M.gguf",
|
||||
description="FLUX.1 Krea dev quantized (q4_k_m). Total size with dependencies: ~14GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
sd35_medium = StarterModel(
|
||||
name="SD3.5 Medium",
|
||||
base=BaseModelType.StableDiffusion3,
|
||||
@@ -580,13 +596,14 @@ t2i_sketch_sdxl = StarterModel(
|
||||
)
|
||||
# endregion
|
||||
# region SpandrelImageToImage
|
||||
realesrgan_anime = StarterModel(
|
||||
name="RealESRGAN_x4plus_anime_6B",
|
||||
animesharp_v4_rcan = StarterModel(
|
||||
name="2x-AnimeSharpV4_RCAN",
|
||||
base=BaseModelType.Any,
|
||||
source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
||||
description="A Real-ESRGAN 4x upscaling model (optimized for anime images).",
|
||||
source="https://github.com/Kim2091/Kim2091-Models/releases/download/2x-AnimeSharpV4/2x-AnimeSharpV4_RCAN.safetensors",
|
||||
description="A 2x upscaling model (optimized for anime images).",
|
||||
type=ModelType.SpandrelImageToImage,
|
||||
)
|
||||
|
||||
realesrgan_x4 = StarterModel(
|
||||
name="RealESRGAN_x4plus",
|
||||
base=BaseModelType.Any,
|
||||
@@ -732,7 +749,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
t2i_lineart_sdxl,
|
||||
t2i_sketch_sdxl,
|
||||
realesrgan_x4,
|
||||
realesrgan_anime,
|
||||
animesharp_v4_rcan,
|
||||
realesrgan_x2,
|
||||
swinir,
|
||||
t5_base_encoder,
|
||||
@@ -743,6 +760,8 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
llava_onevision,
|
||||
flux_fill,
|
||||
cogview4,
|
||||
flux_krea,
|
||||
flux_krea_quantized,
|
||||
]
|
||||
|
||||
sd1_bundle: list[StarterModel] = [
|
||||
@@ -794,6 +813,7 @@ flux_bundle: list[StarterModel] = [
|
||||
flux_redux,
|
||||
flux_fill,
|
||||
flux_kontext_quantized,
|
||||
flux_krea_quantized,
|
||||
]
|
||||
|
||||
STARTER_BUNDLES: dict[str, StarterModelBundle] = {
|
||||
|
||||
145
invokeai/backend/model_manager/util/lora_metadata_extractor.py
Normal file
145
invokeai/backend/model_manager/util/lora_metadata_extractor.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Utility functions for extracting metadata from LoRA model files."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Set, Tuple
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.util.thumbnails import make_thumbnail
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_lora_metadata(
|
||||
model_path: Path, model_key: str, model_images_path: Path
|
||||
) -> Tuple[Optional[str], Optional[Set[str]]]:
|
||||
"""
|
||||
Extract metadata for a LoRA model from associated JSON and image files.
|
||||
|
||||
Args:
|
||||
model_path: Path to the LoRA model file
|
||||
model_key: Unique key for the model
|
||||
model_images_path: Path to the model images directory
|
||||
|
||||
Returns:
|
||||
Tuple of (description, trigger_phrases)
|
||||
"""
|
||||
model_stem = model_path.stem
|
||||
model_dir = model_path.parent
|
||||
|
||||
# Find and process preview image
|
||||
_process_preview_image(model_stem, model_dir, model_key, model_images_path)
|
||||
|
||||
# Extract metadata from JSON
|
||||
description, trigger_phrases = _extract_json_metadata(model_stem, model_dir)
|
||||
|
||||
return description, trigger_phrases
|
||||
|
||||
|
||||
def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, model_images_path: Path) -> bool:
|
||||
"""Find and process a preview image for the model, saving it to the model images store."""
|
||||
image_extensions = [".png", ".jpg", ".jpeg", ".webp"]
|
||||
|
||||
for ext in image_extensions:
|
||||
image_path = model_dir / f"{model_stem}{ext}"
|
||||
if image_path.exists():
|
||||
try:
|
||||
# Open the image
|
||||
with Image.open(image_path) as img:
|
||||
# Create thumbnail and save to model images directory
|
||||
thumbnail = make_thumbnail(img, 256)
|
||||
thumbnail_path = model_images_path / f"{model_key}.webp"
|
||||
thumbnail.save(thumbnail_path, format="webp")
|
||||
|
||||
logger.info(f"Processed preview image {image_path.name} for model {model_key}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process preview image {image_path.name}: {e}")
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
|
||||
"""Extract metadata from a JSON file with the same name as the model."""
|
||||
json_path = model_dir / f"{model_stem}.json"
|
||||
|
||||
if not json_path.exists():
|
||||
return None, None
|
||||
|
||||
try:
|
||||
with open(json_path, "r", encoding="utf-8") as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Extract description
|
||||
description = _build_description(metadata)
|
||||
|
||||
# Extract trigger phrases
|
||||
trigger_phrases = _extract_trigger_phrases(metadata)
|
||||
|
||||
if description or trigger_phrases:
|
||||
logger.info(f"Applied metadata from {json_path.name}")
|
||||
|
||||
return description, trigger_phrases
|
||||
|
||||
except (json.JSONDecodeError, IOError, Exception) as e:
|
||||
logger.warning(f"Failed to read metadata from {json_path}: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def _build_description(metadata: Dict[str, Any]) -> Optional[str]:
|
||||
"""Build a description from metadata fields."""
|
||||
description_parts = []
|
||||
|
||||
if description := metadata.get("description"):
|
||||
description_parts.append(str(description).strip())
|
||||
|
||||
if notes := metadata.get("notes"):
|
||||
description_parts.append(str(notes).strip())
|
||||
|
||||
return " | ".join(description_parts) if description_parts else None
|
||||
|
||||
|
||||
def _extract_trigger_phrases(metadata: Dict[str, Any]) -> Optional[Set[str]]:
|
||||
"""Extract trigger phrases from metadata."""
|
||||
if not (activation_text := metadata.get("activation text")):
|
||||
return None
|
||||
|
||||
activation_text = str(activation_text).strip()
|
||||
if not activation_text:
|
||||
return None
|
||||
|
||||
# Split on commas and clean up each phrase
|
||||
phrases = [phrase.strip() for phrase in activation_text.split(",") if phrase.strip()]
|
||||
|
||||
return set(phrases) if phrases else None
|
||||
|
||||
|
||||
def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_path: Path) -> None:
|
||||
"""
|
||||
Apply extracted metadata to a LoRA model configuration.
|
||||
|
||||
Args:
|
||||
info: The model configuration to update
|
||||
model_path: Path to the LoRA model file
|
||||
model_images_path: Path to the model images directory
|
||||
"""
|
||||
# Only process LoRA models
|
||||
if info.type != ModelType.LoRA:
|
||||
return
|
||||
|
||||
# Extract and apply metadata
|
||||
description, trigger_phrases = extract_lora_metadata(model_path, info.key, model_images_path)
|
||||
|
||||
# We don't set cover_image path in the config anymore since images are stored
|
||||
# separately in the model images store by model key
|
||||
|
||||
if description:
|
||||
info.description = description
|
||||
|
||||
if trigger_phrases:
|
||||
info.trigger_phrases = trigger_phrases
|
||||
@@ -8,8 +8,12 @@ import picklescan.scanner as pscan
|
||||
import safetensors
|
||||
import torch
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.model_manager.taxonomy import ClipVariantType
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
|
||||
@@ -59,9 +63,21 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str,
|
||||
if scan:
|
||||
scan_result = pscan.scan_file_path(path)
|
||||
if scan_result.infected_files != 0:
|
||||
raise Exception(f"The model at {path} is potentially infected by malware. Aborting import.")
|
||||
if get_config().unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
f"The model {path} is potentially infected by malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(f"The model {path} is potentially infected by malware. Aborting import.")
|
||||
if scan_result.scan_err:
|
||||
raise Exception(f"Error scanning model at {path} for malware. Aborting import.")
|
||||
if get_config().unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
f"Error scanning the model at {path} for malware, but picklescan is disabled. "
|
||||
"Proceeding with caution."
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(f"Error scanning the model at {path} for malware. Aborting import.")
|
||||
|
||||
checkpoint = torch.load(path, map_location=torch.device("meta"))
|
||||
return checkpoint
|
||||
|
||||
@@ -20,7 +20,7 @@ def main():
|
||||
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
|
||||
)
|
||||
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
with log_time("Initialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ def main():
|
||||
)
|
||||
|
||||
# inference_dtype = torch.bfloat16
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
with log_time("Initialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def main():
|
||||
"""
|
||||
model_path = Path("/data/misc/text_encoder_2")
|
||||
|
||||
with log_time("Intialize T5 on meta device"):
|
||||
with log_time("Initialize T5 on meta device"):
|
||||
model_config = AutoConfig.from_pretrained(model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
model = AutoModelForTextEncoding.from_config(model_config)
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
dist/
|
||||
static/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
stats.html
|
||||
index.html
|
||||
.yarn/
|
||||
*.scss
|
||||
src/services/api/schema.ts
|
||||
@@ -1,88 +0,0 @@
|
||||
module.exports = {
|
||||
extends: ['@invoke-ai/eslint-config-react'],
|
||||
plugins: ['path', 'i18next'],
|
||||
rules: {
|
||||
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
|
||||
'react-refresh/only-export-components': 'off',
|
||||
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
|
||||
'@typescript-eslint/consistent-type-assertions': 'off',
|
||||
// https://github.com/qdanik/eslint-plugin-path
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||
// TODO: ENABLE THIS RULE BEFORE v6.0.0
|
||||
// 'i18next/no-literal-string': 'error',
|
||||
// https://eslint.org/docs/latest/rules/no-console
|
||||
'no-console': 'warn',
|
||||
// https://eslint.org/docs/latest/rules/no-promise-executor-return
|
||||
'no-promise-executor-return': 'error',
|
||||
// https://eslint.org/docs/latest/rules/require-await
|
||||
'require-await': 'error',
|
||||
// Restrict setActiveTab calls to only use-navigation-api.tsx
|
||||
'no-restricted-syntax': [
|
||||
'error',
|
||||
{
|
||||
selector: 'CallExpression[callee.name="setActiveTab"]',
|
||||
message:
|
||||
'setActiveTab() can only be called from use-navigation-api.tsx. Use navigationApi.switchToTab() instead.',
|
||||
},
|
||||
],
|
||||
// TODO: ENABLE THIS RULE BEFORE v6.0.0
|
||||
'react/display-name': 'off',
|
||||
'no-restricted-properties': [
|
||||
'error',
|
||||
{
|
||||
object: 'crypto',
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
{
|
||||
object: 'navigator',
|
||||
property: 'clipboard',
|
||||
message:
|
||||
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
||||
},
|
||||
],
|
||||
'no-restricted-imports': [
|
||||
'error',
|
||||
{
|
||||
paths: [
|
||||
{
|
||||
name: 'lodash-es',
|
||||
importNames: ['isEqual'],
|
||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||
},
|
||||
{
|
||||
name: 'lodash-es',
|
||||
message: 'Please use es-toolkit instead.',
|
||||
},
|
||||
{
|
||||
name: 'es-toolkit',
|
||||
importNames: ['isEqual'],
|
||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
* Allow setActiveTab calls only in use-navigation-api.tsx
|
||||
*/
|
||||
{
|
||||
files: ['**/use-navigation-api.tsx'],
|
||||
rules: {
|
||||
'no-restricted-syntax': 'off',
|
||||
},
|
||||
},
|
||||
/**
|
||||
* Overrides for stories
|
||||
*/
|
||||
{
|
||||
files: ['*.stories.tsx'],
|
||||
rules: {
|
||||
// We may not have i18n available in stories.
|
||||
'i18next/no-literal-string': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
3
invokeai/frontend/web/.gitignore
vendored
3
invokeai/frontend/web/.gitignore
vendored
@@ -44,4 +44,5 @@ yalc.lock
|
||||
|
||||
# vitest
|
||||
tsconfig.vitest-temp.json
|
||||
coverage/
|
||||
coverage/
|
||||
*.tgz
|
||||
|
||||
@@ -14,3 +14,4 @@ static/
|
||||
src/theme/css/overlayscrollbars.css
|
||||
src/theme_/css/overlayscrollbars.css
|
||||
pnpm-lock.yaml
|
||||
.claude
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
module.exports = {
|
||||
...require('@invoke-ai/prettier-config-react'),
|
||||
overrides: [
|
||||
{
|
||||
files: ['public/locales/*.json'],
|
||||
options: {
|
||||
tabWidth: 4,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
17
invokeai/frontend/web/.prettierrc.json
Normal file
17
invokeai/frontend/web/.prettierrc.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"$schema": "http://json.schemastore.org/prettierrc",
|
||||
"trailingComma": "es5",
|
||||
"printWidth": 120,
|
||||
"tabWidth": 2,
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"endOfLine": "auto",
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["public/locales/*.json"],
|
||||
"options": {
|
||||
"tabWidth": 4
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,21 +1,23 @@
|
||||
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useEffect } from 'react';
|
||||
|
||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
||||
/**
|
||||
* Initializes some state for storybook. Must be in a different component
|
||||
* so that it is run inside the redux context.
|
||||
*/
|
||||
export const ReduxInit = memo((props: PropsWithChildren) => {
|
||||
export const ReduxInit = memo(({ children }: PropsWithChildren) => {
|
||||
const dispatch = useAppDispatch();
|
||||
useGlobalModifiersInit();
|
||||
useEffect(() => {
|
||||
dispatch(
|
||||
modelChanged({ model: { key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' } })
|
||||
);
|
||||
}, []);
|
||||
}, [dispatch]);
|
||||
|
||||
return props.children;
|
||||
return children;
|
||||
});
|
||||
|
||||
ReduxInit.displayName = 'ReduxInit';
|
||||
|
||||
@@ -2,19 +2,13 @@ import type { StorybookConfig } from '@storybook/react-vite';
|
||||
|
||||
const config: StorybookConfig = {
|
||||
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
|
||||
addons: [
|
||||
'@storybook/addon-links',
|
||||
'@storybook/addon-essentials',
|
||||
'@storybook/addon-interactions',
|
||||
'@storybook/addon-storysource',
|
||||
],
|
||||
addons: ['@storybook/addon-links', '@storybook/addon-docs'],
|
||||
|
||||
framework: {
|
||||
name: '@storybook/react-vite',
|
||||
options: {},
|
||||
},
|
||||
docs: {
|
||||
autodocs: 'tag',
|
||||
},
|
||||
|
||||
core: {
|
||||
disableTelemetry: true,
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { addons } from '@storybook/manager-api';
|
||||
import { themes } from '@storybook/theming';
|
||||
import { addons } from 'storybook/manager-api';
|
||||
import { themes } from 'storybook/theming';
|
||||
|
||||
addons.setConfig({
|
||||
theme: themes.dark,
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
import { Preview } from '@storybook/react';
|
||||
import { themes } from '@storybook/theming';
|
||||
import type { Preview } from '@storybook/react-vite';
|
||||
import { themes } from 'storybook/theming';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
import i18n from 'i18next';
|
||||
import { initReactI18next } from 'react-i18next';
|
||||
import { Provider } from 'react-redux';
|
||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||
import { createStore } from '../src/app/store/store';
|
||||
|
||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
import translationEN from '../public/locales/en.json';
|
||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||
import { createStore } from '../src/app/store/store';
|
||||
import { ReduxInit } from './ReduxInit';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
|
||||
i18n.use(initReactI18next).init({
|
||||
lng: 'en',
|
||||
@@ -25,7 +26,7 @@ i18n.use(initReactI18next).init({
|
||||
returnNull: false,
|
||||
});
|
||||
|
||||
const store = createStore(undefined, false);
|
||||
const store = createStore();
|
||||
$store.set(store);
|
||||
$baseUrl.set('http://localhost:9090');
|
||||
|
||||
@@ -46,6 +47,7 @@ const preview: Preview = {
|
||||
parameters: {
|
||||
docs: {
|
||||
theme: themes.dark,
|
||||
codePanel: true,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
246
invokeai/frontend/web/eslint.config.mjs
Normal file
246
invokeai/frontend/web/eslint.config.mjs
Normal file
@@ -0,0 +1,246 @@
|
||||
import js from '@eslint/js';
|
||||
import typescriptEslint from '@typescript-eslint/eslint-plugin';
|
||||
import typescriptParser from '@typescript-eslint/parser';
|
||||
import pluginI18Next from 'eslint-plugin-i18next';
|
||||
import pluginImport from 'eslint-plugin-import';
|
||||
import pluginPath from 'eslint-plugin-path';
|
||||
import pluginReact from 'eslint-plugin-react';
|
||||
import pluginReactHooks from 'eslint-plugin-react-hooks';
|
||||
import pluginReactRefresh from 'eslint-plugin-react-refresh';
|
||||
import pluginSimpleImportSort from 'eslint-plugin-simple-import-sort';
|
||||
import pluginStorybook from 'eslint-plugin-storybook';
|
||||
import pluginUnusedImports from 'eslint-plugin-unused-imports';
|
||||
import globals from 'globals';
|
||||
|
||||
export default [
|
||||
js.configs.recommended,
|
||||
|
||||
{
|
||||
languageOptions: {
|
||||
parser: typescriptParser,
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
},
|
||||
globals: {
|
||||
...globals.browser,
|
||||
...globals.node,
|
||||
GlobalCompositeOperation: 'readonly',
|
||||
RequestInit: 'readonly',
|
||||
},
|
||||
},
|
||||
|
||||
files: ['**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx'],
|
||||
|
||||
plugins: {
|
||||
react: pluginReact,
|
||||
'@typescript-eslint': typescriptEslint,
|
||||
'react-hooks': pluginReactHooks,
|
||||
import: pluginImport,
|
||||
'unused-imports': pluginUnusedImports,
|
||||
'simple-import-sort': pluginSimpleImportSort,
|
||||
'react-refresh': pluginReactRefresh.configs.vite,
|
||||
path: pluginPath,
|
||||
i18next: pluginI18Next,
|
||||
storybook: pluginStorybook,
|
||||
},
|
||||
|
||||
rules: {
|
||||
...typescriptEslint.configs.recommended.rules,
|
||||
...pluginReact.configs.recommended.rules,
|
||||
...pluginReact.configs['jsx-runtime'].rules,
|
||||
...pluginReactHooks.configs.recommended.rules,
|
||||
...pluginStorybook.configs.recommended.rules,
|
||||
|
||||
'react/jsx-no-bind': [
|
||||
'error',
|
||||
{
|
||||
allowBind: true,
|
||||
},
|
||||
],
|
||||
|
||||
'react/jsx-curly-brace-presence': [
|
||||
'error',
|
||||
{
|
||||
props: 'never',
|
||||
children: 'never',
|
||||
},
|
||||
],
|
||||
|
||||
'react-hooks/exhaustive-deps': 'error',
|
||||
|
||||
curly: 'error',
|
||||
'no-var': 'error',
|
||||
'brace-style': 'error',
|
||||
'prefer-template': 'error',
|
||||
radix: 'error',
|
||||
'space-before-blocks': 'error',
|
||||
eqeqeq: 'error',
|
||||
'one-var': ['error', 'never'],
|
||||
'no-eval': 'error',
|
||||
'no-extend-native': 'error',
|
||||
'no-implied-eval': 'error',
|
||||
'no-label-var': 'error',
|
||||
'no-return-assign': 'error',
|
||||
'no-sequences': 'error',
|
||||
'no-template-curly-in-string': 'error',
|
||||
'no-throw-literal': 'error',
|
||||
'no-unmodified-loop-condition': 'error',
|
||||
'import/no-duplicates': 'error',
|
||||
'import/prefer-default-export': 'off',
|
||||
'unused-imports/no-unused-imports': 'error',
|
||||
|
||||
'unused-imports/no-unused-vars': [
|
||||
'error',
|
||||
{
|
||||
vars: 'all',
|
||||
varsIgnorePattern: '^_',
|
||||
args: 'after-used',
|
||||
argsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
|
||||
'simple-import-sort/imports': 'error',
|
||||
'simple-import-sort/exports': 'error',
|
||||
'@typescript-eslint/no-unused-vars': 'off',
|
||||
|
||||
'@typescript-eslint/ban-ts-comment': [
|
||||
'error',
|
||||
{
|
||||
'ts-expect-error': 'allow-with-description',
|
||||
'ts-ignore': true,
|
||||
'ts-nocheck': true,
|
||||
'ts-check': false,
|
||||
minimumDescriptionLength: 10,
|
||||
},
|
||||
],
|
||||
|
||||
'@typescript-eslint/no-empty-interface': [
|
||||
'error',
|
||||
{
|
||||
allowSingleExtends: true,
|
||||
},
|
||||
],
|
||||
|
||||
'@typescript-eslint/consistent-type-imports': [
|
||||
'error',
|
||||
{
|
||||
prefer: 'type-imports',
|
||||
fixStyle: 'separate-type-imports',
|
||||
disallowTypeAnnotations: true,
|
||||
},
|
||||
],
|
||||
|
||||
'@typescript-eslint/no-import-type-side-effects': 'error',
|
||||
|
||||
'@typescript-eslint/consistent-type-assertions': [
|
||||
'error',
|
||||
{
|
||||
assertionStyle: 'as',
|
||||
},
|
||||
],
|
||||
|
||||
'path/no-relative-imports': [
|
||||
'error',
|
||||
{
|
||||
maxDepth: 0,
|
||||
},
|
||||
],
|
||||
|
||||
'no-console': 'warn',
|
||||
'no-promise-executor-return': 'error',
|
||||
'require-await': 'error',
|
||||
|
||||
'no-restricted-syntax': [
|
||||
'error',
|
||||
{
|
||||
selector: 'CallExpression[callee.name="setActiveTab"]',
|
||||
message:
|
||||
'setActiveTab() can only be called from use-navigation-api.tsx. Use navigationApi.switchToTab() instead.',
|
||||
},
|
||||
],
|
||||
|
||||
'no-restricted-properties': [
|
||||
'error',
|
||||
{
|
||||
object: 'crypto',
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
{
|
||||
object: 'navigator',
|
||||
property: 'clipboard',
|
||||
message:
|
||||
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
||||
},
|
||||
],
|
||||
|
||||
// Typescript handles this for us: https://eslint.org/docs/latest/rules/no-redeclare#handled_by_typescript
|
||||
'no-redeclare': 'off',
|
||||
|
||||
'no-restricted-imports': [
|
||||
'error',
|
||||
{
|
||||
paths: [
|
||||
{
|
||||
name: 'lodash-es',
|
||||
importNames: ['isEqual'],
|
||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||
},
|
||||
{
|
||||
name: 'lodash-es',
|
||||
message: 'Please use es-toolkit instead.',
|
||||
},
|
||||
{
|
||||
name: 'es-toolkit',
|
||||
importNames: ['isEqual'],
|
||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||
},
|
||||
{
|
||||
name: 'zod/v3',
|
||||
message: 'Import from zod instead.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
settings: {
|
||||
react: {
|
||||
version: 'detect',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
files: ['**/use-navigation-api.tsx'],
|
||||
rules: {
|
||||
'no-restricted-syntax': 'off',
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
files: ['**/*.stories.tsx'],
|
||||
rules: {
|
||||
'i18next/no-literal-string': 'off',
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ignores: [
|
||||
'**/dist/',
|
||||
'**/static/',
|
||||
'**/.husky/',
|
||||
'**/node_modules/',
|
||||
'**/patches/',
|
||||
'**/stats.html',
|
||||
'**/index.html',
|
||||
'**/.yarn/',
|
||||
'**/*.scss',
|
||||
'src/services/api/schema.ts',
|
||||
'.prettierrc.js',
|
||||
'.storybook',
|
||||
],
|
||||
},
|
||||
];
|
||||
@@ -14,8 +14,10 @@ const config: KnipConfig = {
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// Will be using this
|
||||
'src/common/hooks/useAsyncState.ts',
|
||||
'src/app/store/use-debounced-app-selector.ts',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
ignoreDependencies: ['magic-string'],
|
||||
paths: {
|
||||
'public/*': ['public/*'],
|
||||
},
|
||||
|
||||
@@ -47,25 +47,25 @@
|
||||
"@fontsource-variable/inter": "^5.2.6",
|
||||
"@invoke-ai/ui-library": "^0.0.46",
|
||||
"@nanostores/react": "^1.0.0",
|
||||
"@observ33r/object-equals": "^1.1.4",
|
||||
"@observ33r/object-equals": "^1.1.5",
|
||||
"@reduxjs/toolkit": "2.8.2",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"@xyflow/react": "^12.7.1",
|
||||
"ag-psd": "^28.2.1",
|
||||
"@xyflow/react": "^12.8.2",
|
||||
"ag-psd": "^28.2.2",
|
||||
"async-mutex": "^0.5.0",
|
||||
"chakra-react-select": "^4.9.2",
|
||||
"cmdk": "^1.1.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"dockview": "^4.4.0",
|
||||
"es-toolkit": "^1.39.5",
|
||||
"dockview": "^4.4.1",
|
||||
"es-toolkit": "^1.39.7",
|
||||
"filesize": "^10.1.6",
|
||||
"fracturedjsonjs": "^4.1.0",
|
||||
"framer-motion": "^11.10.0",
|
||||
"i18next": "^25.2.1",
|
||||
"i18next": "^25.3.2",
|
||||
"i18next-http-backend": "^3.0.2",
|
||||
"idb-keyval": "6.2.1",
|
||||
"jsondiffpatch": "^0.7.3",
|
||||
"konva": "^9.3.20",
|
||||
"konva": "^9.3.22",
|
||||
"linkify-react": "^4.3.1",
|
||||
"linkifyjs": "^4.3.1",
|
||||
"lru-cache": "^11.1.0",
|
||||
@@ -83,7 +83,7 @@
|
||||
"react-dom": "^18.3.1",
|
||||
"react-dropzone": "^14.3.8",
|
||||
"react-error-boundary": "^5.0.0",
|
||||
"react-hook-form": "^7.58.1",
|
||||
"react-hook-form": "^7.60.0",
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^15.5.3",
|
||||
"react-icons": "^5.5.0",
|
||||
@@ -103,7 +103,7 @@
|
||||
"use-debounce": "^10.0.5",
|
||||
"use-device-pixel-ratio": "^1.1.2",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.25.67",
|
||||
"zod": "^4.0.10",
|
||||
"zod-validation-error": "^3.5.2"
|
||||
},
|
||||
"peerDependencies": {
|
||||
@@ -111,39 +111,44 @@
|
||||
"react-dom": "^18.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||
"@storybook/addon-essentials": "^8.6.12",
|
||||
"@storybook/addon-interactions": "^8.6.12",
|
||||
"@storybook/addon-links": "^8.6.12",
|
||||
"@storybook/addon-storysource": "^8.6.12",
|
||||
"@storybook/manager-api": "^8.6.12",
|
||||
"@storybook/react": "^8.6.12",
|
||||
"@storybook/react-vite": "^8.6.12",
|
||||
"@storybook/theming": "^8.6.12",
|
||||
"@eslint/js": "^9.31.0",
|
||||
"@storybook/addon-docs": "^9.0.17",
|
||||
"@storybook/addon-links": "^9.0.17",
|
||||
"@storybook/react-vite": "^9.0.17",
|
||||
"@types/node": "^22.15.1",
|
||||
"@types/react": "^18.3.11",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^8.37.0",
|
||||
"@typescript-eslint/parser": "^8.37.0",
|
||||
"@vitejs/plugin-react-swc": "^3.9.0",
|
||||
"@vitest/coverage-v8": "^3.1.2",
|
||||
"@vitest/ui": "^3.1.2",
|
||||
"concurrently": "^9.1.2",
|
||||
"csstype": "^3.1.3",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.1",
|
||||
"eslint-plugin-i18next": "^6.1.1",
|
||||
"eslint-plugin-path": "^1.3.0",
|
||||
"eslint": "^9.31.0",
|
||||
"eslint-plugin-i18next": "^6.1.2",
|
||||
"eslint-plugin-import": "^2.29.1",
|
||||
"eslint-plugin-path": "^2.0.3",
|
||||
"eslint-plugin-react": "^7.33.2",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.5",
|
||||
"eslint-plugin-simple-import-sort": "^12.0.0",
|
||||
"eslint-plugin-storybook": "^9.0.17",
|
||||
"eslint-plugin-unused-imports": "^4.1.4",
|
||||
"globals": "^16.3.0",
|
||||
"knip": "^5.61.3",
|
||||
"magic-string": "^0.30.17",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^7.6.1",
|
||||
"prettier": "^3.5.3",
|
||||
"rollup-plugin-visualizer": "^5.14.0",
|
||||
"storybook": "^8.6.12",
|
||||
"rollup-plugin-visualizer": "^6.0.3",
|
||||
"storybook": "^9.0.17",
|
||||
"tsafe": "^1.8.5",
|
||||
"type-fest": "^4.40.0",
|
||||
"typescript": "^5.8.3",
|
||||
"vite": "^7.0.2",
|
||||
"vite": "^7.0.5",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.2",
|
||||
"vite-plugin-dts": "^4.5.3",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
|
||||
2183
invokeai/frontend/web/pnpm-lock.yaml
generated
2183
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -711,7 +711,8 @@
|
||||
"gaussianBlur": "Gaußsche Unschärfe",
|
||||
"sendToUpscale": "An Hochskalieren senden",
|
||||
"useCpuNoise": "CPU-Rauschen verwenden",
|
||||
"sendToCanvas": "An Leinwand senden"
|
||||
"sendToCanvas": "An Leinwand senden",
|
||||
"disabledNoRasterContent": "Deaktiviert (kein Rasterinhalt)"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Zwischenbilder anzeigen",
|
||||
@@ -789,7 +790,10 @@
|
||||
"pasteSuccess": "Eingefügt in {{destination}}",
|
||||
"pasteFailed": "Einfügen fehlgeschlagen",
|
||||
"unableToCopy": "Kopieren nicht möglich",
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte"
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte",
|
||||
"noRasterLayers": "Keine Rasterebenen gefunden",
|
||||
"noActiveRasterLayers": "Keine aktiven Rasterebenen",
|
||||
"noVisibleRasterLayers": "Keine sichtbaren Rasterebenen"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Bild hochladen",
|
||||
@@ -847,7 +851,10 @@
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners",
|
||||
"uncategorizedImages": "Nicht kategorisierte Bilder",
|
||||
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen",
|
||||
"deletedImagesCannotBeRestored": "Gelöschte Bilder können nicht wiederhergestellt werden."
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -1194,6 +1201,9 @@
|
||||
"Die Kantengröße des Kohärenzdurchlaufs."
|
||||
],
|
||||
"heading": "Kantengröße"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "Rasterebene"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1431,7 +1441,10 @@
|
||||
"autoLayout": "Auto Layout",
|
||||
"copyShareLink": "Teilen-Link kopieren",
|
||||
"download": "Herunterladen",
|
||||
"convertGraph": "Graph konvertieren"
|
||||
"convertGraph": "Graph konvertieren",
|
||||
"filterByTags": "Nach Tags filtern",
|
||||
"yourWorkflows": "Ihre Arbeitsabläufe",
|
||||
"recentlyOpened": "Kürzlich geöffnet"
|
||||
},
|
||||
"sdxl": {
|
||||
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
|
||||
@@ -1444,12 +1457,19 @@
|
||||
"prompt": {
|
||||
"noMatchingTriggers": "Keine passenden Trigger",
|
||||
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen"
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen",
|
||||
"replace": "Ersetzen",
|
||||
"insert": "Einfügen",
|
||||
"discard": "Verwerfen",
|
||||
"generateFromImage": "Prompt aus Bild generieren",
|
||||
"expandCurrentPrompt": "Aktuelle Prompt erweitern",
|
||||
"uploadImageForPromptGeneration": "Bild zur Prompt-Generierung hochladen",
|
||||
"expandingPrompt": "Prompt wird erweitert...",
|
||||
"resultTitle": "Prompt-Erweiterung abgeschlossen"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"queue": "Warteschlange",
|
||||
"generation": "Erzeugung",
|
||||
"gallery": "Galerie",
|
||||
"models": "Modelle",
|
||||
"upscaling": "Hochskalierung",
|
||||
@@ -1573,30 +1593,30 @@
|
||||
"newGlobalReferenceImage": "Neues globales Referenzbild",
|
||||
"newRegionalReferenceImage": "Neues regionales Referenzbild",
|
||||
"newControlLayer": "Neue Kontroll-Ebene",
|
||||
"newRasterLayer": "Neue Raster-Ebene"
|
||||
"newRasterLayer": "Neue Rasterebene"
|
||||
},
|
||||
"rectangle": "Rechteck",
|
||||
"saveCanvasToGallery": "Leinwand in Galerie speichern",
|
||||
"newRasterLayerError": "Problem beim Erstellen einer Raster-Ebene",
|
||||
"newRasterLayerError": "Problem beim Erstellen einer Rasterebene",
|
||||
"saveLayerToAssets": "Ebene in Galerie speichern",
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
"rasterLayer": "Rasterebene",
|
||||
"rasterLayers_withCount_visible": "Rasterebenen ({{count}})",
|
||||
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
|
||||
"deleteSelected": "Ausgewählte löschen",
|
||||
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
|
||||
"newControlLayerOk": "Kontroll-Ebene erstellt",
|
||||
"newControlLayerError": "Problem beim Erstellen einer Kontroll-Ebene",
|
||||
"newRasterLayerOk": "Raster-Layer erstellt",
|
||||
"newRasterLayerOk": "Rasterebene erstellt",
|
||||
"moveToFront": "Nach vorne bringen",
|
||||
"copyToClipboard": "In die Zwischenablage kopieren",
|
||||
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
|
||||
"clearCaches": "Cache leeren",
|
||||
"controlLayer": "Kontroll-Ebene",
|
||||
"rasterLayers_withCount_hidden": "Raster-Ebenen ({{count}} ausgeblendet)",
|
||||
"rasterLayers_withCount_hidden": "Rasterebenen ({{count}} ausgeblendet)",
|
||||
"transparency": "Transparenz",
|
||||
"canvas": "Leinwand",
|
||||
"global": "Global",
|
||||
@@ -1682,7 +1702,14 @@
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
},
|
||||
"bookmark": "Lesezeichen für Schnell-Umschalten"
|
||||
"bookmark": "Lesezeichen für Schnell-Umschalten",
|
||||
"asRasterLayer": "Als $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Als $t(controlLayers.rasterLayer) (Größe anpassen)",
|
||||
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
|
||||
"rasterLayer_withCount_other": "Rasterebenen",
|
||||
"newRasterLayer": "Neue $t(controlLayers.rasterLayer)",
|
||||
"showNonRasterLayers": "Nicht-Rasterebenen anzeigen (Umschalt+H)",
|
||||
"hideNonRasterLayers": "Nicht-Rasterebenen ausblenden (Umschalt+H)"
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -114,6 +114,9 @@
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"positivePrompt": "Positive Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"removeNegativePrompt": "Remove Negative Prompt",
|
||||
"addNegativePrompt": "Add Negative Prompt",
|
||||
"selectYourModel": "Select Your Model",
|
||||
"discordLabel": "Discord",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"dontShowMeThese": "Don't show me these",
|
||||
@@ -253,6 +256,7 @@
|
||||
"cancel": "Cancel",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
|
||||
"cancelAllExceptCurrent": "Cancel All Except Current",
|
||||
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
|
||||
"cancelTooltip": "Cancel Current Item",
|
||||
"cancelSucceeded": "Item Canceled",
|
||||
@@ -273,7 +277,7 @@
|
||||
"retryItem": "Retry Item",
|
||||
"cancelBatchSucceeded": "Batch Canceled",
|
||||
"cancelBatchFailed": "Problem Canceling Batch",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled and the Canvas Staging Area will be reset.",
|
||||
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
|
||||
"current": "Current",
|
||||
"next": "Next",
|
||||
@@ -470,6 +474,11 @@
|
||||
"togglePanels": {
|
||||
"title": "Toggle Panels",
|
||||
"desc": "Show or hide both left and right panels at once."
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Select the Generate Tab",
|
||||
"desc": "Selects the Generate tab.",
|
||||
"key": "1"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
@@ -574,6 +583,10 @@
|
||||
"title": "Transform",
|
||||
"desc": "Transform the selected layer."
|
||||
},
|
||||
"invertMask": {
|
||||
"title": "Invert Mask",
|
||||
"desc": "Invert the selected inpaint mask, creating a new mask with opposite transparency."
|
||||
},
|
||||
"applyFilter": {
|
||||
"title": "Apply Filter",
|
||||
"desc": "Apply the pending filter to the selected layer."
|
||||
@@ -599,6 +612,24 @@
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "Toggle Non-Raster Layers",
|
||||
"desc": "Show or hide all non-raster layer categories (Control Layers, Inpaint Masks, Regional Guidance)."
|
||||
},
|
||||
"fitBboxToLayers": {
|
||||
"title": "Fit Bbox To Layers",
|
||||
"desc": "Automatically adjust the generation bounding box to fit visible layers"
|
||||
},
|
||||
"fitBboxToMasks": {
|
||||
"title": "Fit Bbox To Masks",
|
||||
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Apply Segment Anything",
|
||||
"desc": "Apply the current Segment Anything mask.",
|
||||
"key": "enter"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Cancel Segment Anything",
|
||||
"desc": "Cancel the current Segment Anything operation.",
|
||||
"key": "esc"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -728,6 +759,10 @@
|
||||
"deleteSelection": {
|
||||
"title": "Delete",
|
||||
"desc": "Delete all selected images. By default, you will be prompted to confirm deletion. If the images are currently in use in the app, you will be warned."
|
||||
},
|
||||
"starImage": {
|
||||
"title": "Star/Unstar Image",
|
||||
"desc": "Star or unstar the selected image."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -735,6 +770,7 @@
|
||||
"allPrompts": "All Prompts",
|
||||
"cfgScale": "CFG scale",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"clipSkip": "$t(parameters.clipSkip)",
|
||||
"createdBy": "Created By",
|
||||
"generationMode": "Generation Mode",
|
||||
"guidance": "Guidance",
|
||||
@@ -1125,7 +1161,23 @@
|
||||
"addItem": "Add Item",
|
||||
"generateValues": "Generate Values",
|
||||
"floatRangeGenerator": "Float Range Generator",
|
||||
"integerRangeGenerator": "Integer Range Generator"
|
||||
"integerRangeGenerator": "Integer Range Generator",
|
||||
"layout": {
|
||||
"autoLayout": "Auto Layout",
|
||||
"layeringStrategy": "Layering Strategy",
|
||||
"networkSimplex": "Network Simplex",
|
||||
"longestPath": "Longest Path",
|
||||
"nodeSpacing": "Node Spacing",
|
||||
"layerSpacing": "Layer Spacing",
|
||||
"layoutDirection": "Layout Direction",
|
||||
"layoutDirectionRight": "Right",
|
||||
"layoutDirectionDown": "Down",
|
||||
"alignment": "Node Alignment",
|
||||
"alignmentUL": "Top Left",
|
||||
"alignmentDL": "Bottom Left",
|
||||
"alignmentUR": "Top Right",
|
||||
"alignmentDR": "Bottom Right"
|
||||
}
|
||||
},
|
||||
"parameters": {
|
||||
"aspect": "Aspect",
|
||||
@@ -1191,7 +1243,7 @@
|
||||
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
|
||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with FLUX Kontext via BFL API",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
@@ -1239,6 +1291,7 @@
|
||||
"remixImage": "Remix Image",
|
||||
"usePrompt": "Use Prompt",
|
||||
"useSeed": "Use Seed",
|
||||
"useClipSkip": "Use CLIP Skip",
|
||||
"width": "Width",
|
||||
"gaussianBlur": "Gaussian Blur",
|
||||
"boxBlur": "Box Blur",
|
||||
@@ -1407,7 +1460,15 @@
|
||||
"sentToUpscale": "Sent to Upscale",
|
||||
"promptGenerationStarted": "Prompt generation started",
|
||||
"uploadAndPromptGenerationFailed": "Failed to upload image and generate prompt",
|
||||
"promptExpansionFailed": "We ran into an issue. Please try prompt expansion again."
|
||||
"promptExpansionFailed": "We ran into an issue. Please try prompt expansion again.",
|
||||
"maskInverted": "Mask Inverted",
|
||||
"maskInvertFailed": "Failed to Invert Mask",
|
||||
"noVisibleMasks": "No Visible Masks",
|
||||
"noVisibleMasksDesc": "Create or enable at least one inpaint mask to invert",
|
||||
"noInpaintMaskSelected": "No Inpaint Mask Selected",
|
||||
"noInpaintMaskSelectedDesc": "Select an inpaint mask to invert",
|
||||
"invalidBbox": "Invalid Bounding Box",
|
||||
"invalidBboxDesc": "The bounding box has no valid dimensions"
|
||||
},
|
||||
"popovers": {
|
||||
"clipSkip": {
|
||||
@@ -1775,6 +1836,20 @@
|
||||
"Structure controls how closely the output image will keep to the layout of the original. Low structure allows major changes, while high structure strictly maintains the original composition and layout."
|
||||
]
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Tile Size",
|
||||
"paragraphs": [
|
||||
"Controls the size of tiles used during the upscaling process. Larger tiles use more memory but may produce better results.",
|
||||
"SD1.5 models default to 768, while SDXL models default to 1024. Reduce tile size if you encounter memory issues."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Tile Overlap",
|
||||
"paragraphs": [
|
||||
"Controls the overlap between adjacent tiles during upscaling. Higher overlap values help reduce visible seams between tiles but use more memory.",
|
||||
"The default value of 128 works well for most cases, but you can adjust based on your specific needs and memory constraints."
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "Non-Commercial License",
|
||||
"paragraphs": [
|
||||
@@ -1926,6 +2001,7 @@
|
||||
"canvas": "Canvas",
|
||||
"bookmark": "Bookmark for Quick Switch",
|
||||
"fitBboxToLayers": "Fit Bbox To Layers",
|
||||
"fitBboxToMasks": "Fit Bbox To Masks",
|
||||
"removeBookmark": "Remove Bookmark",
|
||||
"saveCanvasToGallery": "Save Canvas to Gallery",
|
||||
"saveBboxToGallery": "Save Bbox to Gallery",
|
||||
@@ -1962,7 +2038,6 @@
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
"outputOnlyMaskedRegions": "Output Only Generated Regions",
|
||||
"saveAllImagesToGallery": "Save All Images to Gallery",
|
||||
"addLayer": "Add Layer",
|
||||
"duplicate": "Duplicate",
|
||||
"moveToFront": "Move to Front",
|
||||
@@ -1991,6 +2066,7 @@
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"invertMask": "Invert Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"referenceImageRegional": "Reference Image (Regional)",
|
||||
"referenceImageGlobal": "Reference Image (Global)",
|
||||
@@ -1999,6 +2075,8 @@
|
||||
"asControlLayer": "As $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
|
||||
"referenceImage": "Reference Image",
|
||||
"maxRefImages": "Max Ref Images",
|
||||
"useAsReferenceImage": "Use as Reference Image",
|
||||
"regionalReferenceImage": "Regional Reference Image",
|
||||
"globalReferenceImage": "Global Reference Image",
|
||||
"sendingToCanvas": "Staging Generations on Canvas",
|
||||
@@ -2087,9 +2165,9 @@
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image to get started.",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the gallery onto this Reference Image to get started.",
|
||||
"uploadOrDragAnImage": "Drag an image from the gallery or <UploadButton>upload an image</UploadButton>.",
|
||||
"imageNoise": "Image Noise",
|
||||
"denoiseLimit": "Denoise Limit",
|
||||
@@ -2107,7 +2185,8 @@
|
||||
"rgReferenceImagesNotSupported": "regional Reference Images not supported for selected base model",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
|
||||
"rgNoRegion": "no region drawn",
|
||||
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill"
|
||||
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill",
|
||||
"bboxHidden": "Bounding box is hidden (shift+o to toggle)"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "Unable to find image",
|
||||
@@ -2332,7 +2411,8 @@
|
||||
"alert": "Preserving Masked Region"
|
||||
},
|
||||
"saveAllImagesToGallery": {
|
||||
"alert": "Saving All Images to Gallery"
|
||||
"label": "Send New Generations to Gallery",
|
||||
"alert": "Sending new generations to Gallery, bypassing Canvas"
|
||||
},
|
||||
"isolatedStagingPreview": "Isolated Staging Preview",
|
||||
"isolatedPreview": "Isolated Preview",
|
||||
@@ -2396,6 +2476,9 @@
|
||||
"upscaleModel": "Upscale Model",
|
||||
"postProcessingModel": "Post-Processing Model",
|
||||
"scale": "Scale",
|
||||
"tileControl": "Tile Control",
|
||||
"tileSize": "Tile Size",
|
||||
"tileOverlap": "Tile Overlap",
|
||||
"postProcessingMissingModelWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install a post-processing (image to image) model.",
|
||||
"missingModelsWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install the required models:",
|
||||
"mainModelDesc": "Main model (SD1.5 or SDXL architecture)",
|
||||
@@ -2462,7 +2545,7 @@
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generation",
|
||||
"generate": "Generate",
|
||||
"canvas": "Canvas",
|
||||
"workflows": "Workflows",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
@@ -2473,6 +2556,12 @@
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Gallery"
|
||||
},
|
||||
"panels": {
|
||||
"launchpad": "Launchpad",
|
||||
"workflowEditor": "Workflow Editor",
|
||||
"imageViewer": "Image Viewer",
|
||||
"canvas": "Canvas"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Go deep with Workflows.",
|
||||
"upscalingTitle": "Upscale and add detail.",
|
||||
@@ -2480,6 +2569,28 @@
|
||||
"generateTitle": "Generate images from text prompts.",
|
||||
"modelGuideText": "Want to learn what prompts work best for each model?",
|
||||
"modelGuideLink": "Check out our Model Guide.",
|
||||
"createNewWorkflowFromScratch": "Create a new Workflow from scratch",
|
||||
"browseAndLoadWorkflows": "Browse and load existing workflows",
|
||||
"addStyleRef": {
|
||||
"title": "Add a Style Reference",
|
||||
"description": "Add an image to transfer its look."
|
||||
},
|
||||
"editImage": {
|
||||
"title": "Edit Image",
|
||||
"description": "Add an image to refine."
|
||||
},
|
||||
"generateFromText": {
|
||||
"title": "Generate from Text",
|
||||
"description": "Enter a prompt and Invoke."
|
||||
},
|
||||
"useALayoutImage": {
|
||||
"title": "Use a Layout Image",
|
||||
"description": "Add an image to control composition."
|
||||
},
|
||||
"generate": {
|
||||
"canvasCalloutTitle": "Looking to get more control, edit, and iterate on your images?",
|
||||
"canvasCalloutLink": "Navigate to Canvas for more capabilities."
|
||||
},
|
||||
"workflows": {
|
||||
"description": "Workflows are reusable templates that automate image generation tasks, allowing you to quickly perform complex operations and get consistent results.",
|
||||
"learnMoreLink": "Learn more about creating workflows",
|
||||
@@ -2516,6 +2627,13 @@
|
||||
"upscaleModel": "Upscale Model",
|
||||
"model": "Model",
|
||||
"scale": "Scale",
|
||||
"creativityAndStructure": {
|
||||
"title": "Creativity & Structure Defaults",
|
||||
"conservative": "Conservative",
|
||||
"balanced": "Balanced",
|
||||
"creative": "Creative",
|
||||
"artistic": "Artistic"
|
||||
},
|
||||
"helpText": {
|
||||
"promptAdvice": "When upscaling, use a prompt that describes the medium and style. Avoid describing specific content details in the image.",
|
||||
"styleAdvice": "Upscaling works best with the general style of your image."
|
||||
@@ -2560,9 +2678,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Generate images faster with new Launchpads and a simplified Generate tab.",
|
||||
"Edit with prompts using Flux Kontext Dev.",
|
||||
"Export to PSD, bulk-hide overlays, organize models & images — all in a reimagined interface built for control."
|
||||
"Misc QoL: Toggle Bbox visibility, highlight nodes with errors, prevent adding node fields to Builder form multiple times, CLIP Skip metadata recallable",
|
||||
"Reduced VRAM usage for multiple Kontext Ref images and VAE encoding"
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -399,7 +399,6 @@
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"queue": "Cola",
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
|
||||
@@ -1820,7 +1820,6 @@
|
||||
"upscaling": "Agrandissement",
|
||||
"gallery": "Galerie",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"generation": "Génération",
|
||||
"workflows": "Workflows",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Modèles",
|
||||
@@ -2375,65 +2374,8 @@
|
||||
},
|
||||
"supportVideos": {
|
||||
"watch": "Regarder",
|
||||
"videos": {
|
||||
"upscaling": {
|
||||
"description": "Comment améliorer la résolution des images avec les outils d'Invoke pour les agrandir.",
|
||||
"title": "Upscaling"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Étapes pour générer et enregistrer des images dans la galerie.",
|
||||
"title": "Comment générer et enregistrer dans la galerie ?"
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilisation des couche de contrôle et des guides de référence",
|
||||
"description": "Apprenez à guider la création de vos images avec des couche de contrôle et des images de référence."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Plongez dans les modèles d'IA et découvrez comment utiliser les adaptateurs de concepts pour un contrôle créatif.",
|
||||
"title": "Exploration des modèles d'IA et des adaptateurs de concepts"
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Comment utiliser les réseaux de contrôle et les couches de contrôle ?",
|
||||
"description": "Apprenez à appliquer des couches de contrôle et des ControlNets à vos images."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Apprenez à composer des images en utilisant le canvas de contrôle d'Invoke.",
|
||||
"title": "Créer et composer sur le canvas de contrôle d'Invoke"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Comment puis-je modifier sur la toile ?",
|
||||
"description": "Guide pour éditer des images directement sur la toile."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "Comment effectuer une transformation d'image à image ?",
|
||||
"description": "Tutoriel sur la réalisation de transformations d'image à image dans Invoke."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Comment utiliser les IP Adapters globaux et les images de référence ?",
|
||||
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Comment utiliser les masques d'inpainting ?",
|
||||
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Créer votre première image",
|
||||
"description": "Introduction à la création d'une image à partir de zéro en utilisant les outils d'Invoke."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Comprendre l'Image-à-Image et le Débruitage",
|
||||
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Comment effectuer un outpainting ?",
|
||||
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
|
||||
}
|
||||
},
|
||||
"gettingStarted": "Commencer",
|
||||
"studioSessionsDesc1": "Consultez le <StudioSessionsPlaylistLink /> pour des approfondissements sur Invoke.",
|
||||
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
|
||||
"supportVideos": "Vidéos d'assistance",
|
||||
"controlCanvas": "Contrôler la toile"
|
||||
"supportVideos": "Vidéos d'assistance"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Effacer le cache du modèle",
|
||||
|
||||
@@ -128,7 +128,9 @@
|
||||
"search": "Cerca",
|
||||
"clear": "Cancella",
|
||||
"compactView": "Vista compatta",
|
||||
"fullView": "Vista completa"
|
||||
"fullView": "Vista completa",
|
||||
"removeNegativePrompt": "Rimuovi prompt negativo",
|
||||
"addNegativePrompt": "Aggiungi prompt negativo"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -152,7 +154,7 @@
|
||||
"image": "immagine",
|
||||
"drop": "Rilascia",
|
||||
"unstarImage": "Rimuovi contrassegno immagine",
|
||||
"dropOrUpload": "$t(gallery.drop) o carica",
|
||||
"dropOrUpload": "Rilascia o carica",
|
||||
"starImage": "Contrassegna l'immagine",
|
||||
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
||||
"bulkDownloadRequested": "Preparazione del download",
|
||||
@@ -197,7 +199,8 @@
|
||||
"boardsSettings": "Impostazioni Bacheche",
|
||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||
"assets": "Risorse",
|
||||
"images": "Immagini"
|
||||
"images": "Immagini",
|
||||
"useForPromptGeneration": "Usa per generare il prompt"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -253,12 +256,16 @@
|
||||
"desc": "Attiva/disattiva il pannello destro."
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "Ripristina il layout del pannello",
|
||||
"desc": "Ripristina le dimensioni e il layout predefiniti dei pannelli sinistro e destro."
|
||||
"title": "Ripristina lo schema del pannello",
|
||||
"desc": "Ripristina le dimensioni e lo schema predefiniti dei pannelli sinistro e destro."
|
||||
},
|
||||
"togglePanels": {
|
||||
"title": "Attiva/disattiva i pannelli",
|
||||
"desc": "Mostra o nascondi contemporaneamente i pannelli sinistro e destro."
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Seleziona la scheda Genera",
|
||||
"desc": "Seleziona la scheda Genera."
|
||||
}
|
||||
},
|
||||
"hotkeys": "Tasti di scelta rapida",
|
||||
@@ -379,6 +386,36 @@
|
||||
"applyTransform": {
|
||||
"title": "Applica trasformazione",
|
||||
"desc": "Applica la trasformazione in sospeso al livello selezionato."
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"desc": "Mostra o nascondi tutte le categorie di livelli non raster (Livelli di controllo, Maschere di Inpaint, Guida regionale).",
|
||||
"title": "Attiva/disattiva livelli non raster"
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Comportamento",
|
||||
"display": "Mostra",
|
||||
"grid": "Griglia"
|
||||
},
|
||||
"invertMask": {
|
||||
"title": "Inverti maschera",
|
||||
"desc": "Inverte la maschera di inpaint selezionata, creando una nuova maschera con trasparenza opposta."
|
||||
},
|
||||
"fitBboxToMasks": {
|
||||
"title": "Adatta il riquadro di delimitazione alle maschere",
|
||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo alle maschere di inpaint visibili"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Applica Segment Anything",
|
||||
"desc": "Applica la maschera Segment Anything corrente.",
|
||||
"key": "invio"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Annulla Segment Anything",
|
||||
"desc": "Annulla l'operazione Segment Anything corrente."
|
||||
},
|
||||
"fitBboxToLayers": {
|
||||
"title": "Adatta il riquadro di delimitazione ai livelli",
|
||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo ai livelli visibili"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -508,6 +545,10 @@
|
||||
"galleryNavUpAlt": {
|
||||
"desc": "Uguale a Naviga verso l'alto, ma seleziona l'immagine da confrontare, aprendo la modalità di confronto se non è già aperta.",
|
||||
"title": "Naviga verso l'alto (Confronta immagine)"
|
||||
},
|
||||
"starImage": {
|
||||
"desc": "Aggiungi/Rimuovi contrassegno all'immagine selezionata.",
|
||||
"title": "Aggiungi / Rimuovi contrassegno immagine"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -623,7 +664,7 @@
|
||||
"installingXModels_one": "Installazione di {{count}} modello",
|
||||
"installingXModels_many": "Installazione di {{count}} modelli",
|
||||
"installingXModels_other": "Installazione di {{count}} modelli",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze.",
|
||||
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
|
||||
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
|
||||
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
|
||||
@@ -656,7 +697,27 @@
|
||||
"manageModels": "Gestione modelli",
|
||||
"hfTokenReset": "Ripristino del gettone HF",
|
||||
"relatedModels": "Modelli correlati",
|
||||
"showOnlyRelatedModels": "Correlati"
|
||||
"showOnlyRelatedModels": "Correlati",
|
||||
"installedModelsCount": "{{installed}} di {{total}} modelli installati.",
|
||||
"allNModelsInstalled": "Tutti i {{count}} modelli installati",
|
||||
"nToInstall": "{{count}} da installare",
|
||||
"nAlreadyInstalled": "{{count}} già installati",
|
||||
"bundleAlreadyInstalled": "Pacchetto già installato",
|
||||
"bundleAlreadyInstalledDesc": "Tutti i modelli nel pacchetto {{bundleName}} sono già installati.",
|
||||
"launchpad": {
|
||||
"description": "Per utilizzare la maggior parte delle funzionalità della piattaforma, Invoke richiede l'installazione di modelli. Scegli tra le opzioni di installazione manuale o esplora i modelli di avvio selezionati.",
|
||||
"manualInstall": "Installazione manuale",
|
||||
"urlDescription": "Installa i modelli da un URL o da un percorso file locale. Perfetto per modelli specifici che desideri aggiungere.",
|
||||
"huggingFaceDescription": "Esplora e installa i modelli direttamente dai repository di HuggingFace.",
|
||||
"scanFolderDescription": "Esegui la scansione di una cartella locale per rilevare e installare automaticamente i modelli.",
|
||||
"recommendedModels": "Modelli consigliati",
|
||||
"exploreStarter": "Oppure sfoglia tutti i modelli iniziali disponibili",
|
||||
"welcome": "Benvenuti in Gestione Modelli",
|
||||
"quickStart": "Pacchetti di avvio rapido",
|
||||
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
|
||||
"browseAll": "Oppure scopri tutti i modelli disponibili:"
|
||||
},
|
||||
"launchpadTab": "Rampa di lancio"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -742,7 +803,10 @@
|
||||
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade."
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
|
||||
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con FLUX Kontext tramite BFL API",
|
||||
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
|
||||
"promptExpansionPending": "Espansione del prompt in corso"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -884,7 +948,34 @@
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
|
||||
"noRasterLayers": "Nessun livello raster trovato",
|
||||
"noRasterLayersDesc": "Crea almeno un livello raster da esportare in PSD",
|
||||
"noActiveRasterLayers": "Nessun livello raster attivo",
|
||||
"noActiveRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
||||
"noVisibleRasterLayers": "Nessun livello raster visibile",
|
||||
"noVisibleRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
||||
"invalidCanvasDimensions": "Dimensioni della tela non valide",
|
||||
"canvasTooLarge": "Tela troppo grande",
|
||||
"canvasTooLargeDesc": "Le dimensioni della tela superano le dimensioni massime consentite per l'esportazione in formato PSD. Riduci la larghezza e l'altezza totali della tela e riprova.",
|
||||
"failedToProcessLayers": "Impossibile elaborare i livelli",
|
||||
"psdExportSuccess": "Esportazione PSD completata",
|
||||
"psdExportSuccessDesc": "Esportazione riuscita di {{count}} livelli nel file PSD",
|
||||
"problemExportingPSD": "Problema durante l'esportazione PSD",
|
||||
"noValidLayerAdapters": "Nessun adattatore di livello valido trovato",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext non supporta la generazione di immagini posizionate sulla tela. Riprova utilizzando la sezione Immagine di riferimento e disattiva tutti i livelli raster.",
|
||||
"canvasManagerNotAvailable": "Gestione tela non disponibile",
|
||||
"promptExpansionFailed": "Abbiamo riscontrato un problema. Riprova a eseguire l'espansione del prompt.",
|
||||
"uploadAndPromptGenerationFailed": "Impossibile caricare l'immagine e generare il prompt",
|
||||
"promptGenerationStarted": "Generazione del prompt avviata",
|
||||
"invalidBboxDesc": "Il riquadro di delimitazione non ha dimensioni valide",
|
||||
"invalidBbox": "Riquadro di delimitazione non valido",
|
||||
"noInpaintMaskSelectedDesc": "Seleziona una maschera di inpaint da invertire",
|
||||
"noInpaintMaskSelected": "Nessuna maschera di inpaint selezionata",
|
||||
"noVisibleMasksDesc": "Crea o abilita almeno una maschera inpaint da invertire",
|
||||
"noVisibleMasks": "Nessuna maschera visibile",
|
||||
"maskInvertFailed": "Impossibile invertire la maschera",
|
||||
"maskInverted": "Maschera invertita"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1079,7 +1170,22 @@
|
||||
"missingField_withName": "Campo \"{{name}}\" mancante",
|
||||
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
|
||||
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante"
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante",
|
||||
"layout": {
|
||||
"alignmentDR": "In basso a destra",
|
||||
"autoLayout": "Schema automatico",
|
||||
"nodeSpacing": "Spaziatura nodi",
|
||||
"layerSpacing": "Spaziatura livelli",
|
||||
"layeringStrategy": "Strategia livelli",
|
||||
"longestPath": "Percorso più lungo",
|
||||
"layoutDirection": "Direzione schema",
|
||||
"layoutDirectionRight": "A destra",
|
||||
"layoutDirectionDown": "In basso",
|
||||
"alignment": "Allineamento nodi",
|
||||
"alignmentUL": "In alto a sinistra",
|
||||
"alignmentDL": "In basso a sinistra",
|
||||
"alignmentUR": "In alto a destra"
|
||||
}
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1156,7 +1262,7 @@
|
||||
"batchQueuedDesc_other": "Aggiunte {{count}} sessioni a {{direction}} della coda",
|
||||
"graphQueued": "Grafico in coda",
|
||||
"batch": "Lotto",
|
||||
"clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati.",
|
||||
"clearQueueAlertDialog": "La cancellazione della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati e l'area di lavoro della Tela verrà reimpostata.",
|
||||
"pending": "In attesa",
|
||||
"completedIn": "Completato in",
|
||||
"resumeFailed": "Problema nel riavvio dell'elaborazione",
|
||||
@@ -1212,7 +1318,8 @@
|
||||
"retrySucceeded": "Elemento rieseguito",
|
||||
"retryItem": "Riesegui elemento",
|
||||
"retryFailed": "Problema riesecuzione elemento",
|
||||
"credits": "Crediti"
|
||||
"credits": "Crediti",
|
||||
"cancelAllExceptCurrent": "Annulla tutto tranne quello corrente"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1225,7 +1332,8 @@
|
||||
"addLora": "Aggiungi LoRA",
|
||||
"defaultVAE": "VAE predefinito",
|
||||
"concepts": "Concetti",
|
||||
"lora": "LoRA"
|
||||
"lora": "LoRA",
|
||||
"noCompatibleLoRAs": "Nessun LoRA compatibile"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Disabilita",
|
||||
@@ -1626,7 +1734,7 @@
|
||||
"structure": {
|
||||
"heading": "Struttura",
|
||||
"paragraphs": [
|
||||
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Una struttura bassa permette cambiamenti significativi, mentre una struttura alta conserva la composizione e il layout originali."
|
||||
"La struttura determina quanto l'immagine finale rispecchierà lo schema dell'originale. Un valore struttura basso permette cambiamenti significativi, mentre un valore struttura alto conserva la composizione e lo schema originali."
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
@@ -1683,6 +1791,20 @@
|
||||
"paragraphs": [
|
||||
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
|
||||
]
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Dimensione riquadro",
|
||||
"paragraphs": [
|
||||
"Controlla la dimensione dei riquadri utilizzati durante il processo di ampliamento. Riquadri più grandi consumano più memoria, ma possono produrre risultati migliori.",
|
||||
"I modelli SD1.5 hanno un valore predefinito di 768, mentre i modelli SDXL hanno un valore predefinito di 1024. Ridurre le dimensioni dei riquadri in caso di problemi di memoria."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Sovrapposizione riquadri",
|
||||
"paragraphs": [
|
||||
"Controlla la sovrapposizione tra riquadri adiacenti durante l'ampliamento. Valori di sovrapposizione più elevati aiutano a ridurre le giunzioni visibili tra i riquadri, ma consuma più memoria.",
|
||||
"Il valore predefinito di 128 è adatto alla maggior parte dei casi, ma è possibile modificarlo in base alle proprie esigenze specifiche e ai limiti di memoria."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1730,7 +1852,7 @@
|
||||
"parameterSet": "Parametro {{parameter}} impostato",
|
||||
"parsingFailed": "Analisi non riuscita",
|
||||
"recallParameter": "Richiama {{label}}",
|
||||
"canvasV2Metadata": "Tela",
|
||||
"canvasV2Metadata": "Livelli Tela",
|
||||
"guidance": "Guida",
|
||||
"seamlessXAxis": "Asse X senza giunte",
|
||||
"seamlessYAxis": "Asse Y senza giunte",
|
||||
@@ -1778,7 +1900,7 @@
|
||||
"opened": "Aperto",
|
||||
"convertGraph": "Converti grafico",
|
||||
"loadWorkflow": "$t(common.load) Flusso di lavoro",
|
||||
"autoLayout": "Disposizione automatica",
|
||||
"autoLayout": "Schema automatico",
|
||||
"loadFromGraph": "Carica il flusso di lavoro dal grafico",
|
||||
"userWorkflows": "Flussi di lavoro utente",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
@@ -1901,7 +2023,16 @@
|
||||
"prompt": {
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente",
|
||||
"discard": "Scarta",
|
||||
"insert": "Inserisci",
|
||||
"replace": "Sostituisci",
|
||||
"resultSubtitle": "Scegli come gestire il prompt espanso:",
|
||||
"resultTitle": "Espansione del prompt completata",
|
||||
"expandingPrompt": "Espansione del prompt...",
|
||||
"uploadImageForPromptGeneration": "Carica l'immagine per la generazione del prompt",
|
||||
"expandCurrentPrompt": "Espandi il prompt corrente",
|
||||
"generateFromImage": "Genera prompt dall'immagine"
|
||||
},
|
||||
"controlLayers": {
|
||||
"addLayer": "Aggiungi Livello",
|
||||
@@ -2212,7 +2343,11 @@
|
||||
"label": "Preserva la regione mascherata"
|
||||
},
|
||||
"isolatedLayerPreview": "Anteprima livello isolato",
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione."
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione.",
|
||||
"saveAllImagesToGallery": {
|
||||
"alert": "Invia le nuove generazioni alla Galleria, bypassando la Tela",
|
||||
"label": "Invia le nuove generazioni alla Galleria"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
"reset": "Reimposta",
|
||||
@@ -2262,7 +2397,8 @@
|
||||
"newRegionalGuidance": "Nuova Guida Regionale",
|
||||
"copyToClipboard": "Copia negli appunti",
|
||||
"copyCanvasToClipboard": "Copia la tela negli appunti",
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti",
|
||||
"newResizedControlLayer": "Nuovo livello di controllo ridimensionato"
|
||||
},
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
||||
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
||||
@@ -2299,10 +2435,10 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla galleria su questa Immagine di riferimento.",
|
||||
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||
@@ -2352,11 +2488,25 @@
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
"imageNoise": "Rumore dell'immagine",
|
||||
"exportCanvasToPSD": "Esporta la tela in PSD",
|
||||
"ruleOfThirds": "Mostra la regola dei terzi",
|
||||
"showNonRasterLayers": "Mostra livelli non raster (Shift+H)",
|
||||
"hideNonRasterLayers": "Nascondi livelli non raster (Shift+H)",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questa immagine di riferimento o <PullBboxButton>trascina il riquadro di delimitazione in questa immagine di riferimento</PullBboxButton> per iniziare.",
|
||||
"uploadOrDragAnImage": "Trascina un'immagine dalla galleria o <UploadButton>carica un'immagine</UploadButton>.",
|
||||
"autoSwitch": {
|
||||
"switchOnStart": "All'inizio",
|
||||
"switchOnFinish": "Alla fine",
|
||||
"off": "Spento"
|
||||
},
|
||||
"invertMask": "Inverti maschera",
|
||||
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere",
|
||||
"maxRefImages": "Max Immagini di rif.to",
|
||||
"useAsReferenceImage": "Usa come immagine di riferimento"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generazione",
|
||||
"canvas": "Tela",
|
||||
"workflows": "Flussi di lavoro",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
@@ -2365,7 +2515,92 @@
|
||||
"queue": "Coda",
|
||||
"upscaling": "Amplia",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Galleria"
|
||||
"gallery": "Galleria",
|
||||
"generate": "Genera"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Approfondisci i flussi di lavoro.",
|
||||
"upscalingTitle": "Amplia e aggiungi dettagli.",
|
||||
"canvasTitle": "Modifica e perfeziona sulla tela.",
|
||||
"generateTitle": "Genera immagini da prompt testuali.",
|
||||
"modelGuideText": "Vuoi scoprire quali prompt funzionano meglio per ciascun modello?",
|
||||
"modelGuideLink": "Consulta la nostra guida ai modelli.",
|
||||
"workflows": {
|
||||
"description": "I flussi di lavoro sono modelli riutilizzabili che automatizzano le attività di generazione delle immagini, consentendo di eseguire rapidamente operazioni complesse e di ottenere risultati coerenti.",
|
||||
"learnMoreLink": "Scopri di più sulla creazione di flussi di lavoro",
|
||||
"browseTemplates": {
|
||||
"title": "Sfoglia i modelli di flusso di lavoro",
|
||||
"description": "Scegli tra flussi di lavoro predefiniti per le attività comuni"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Crea un nuovo flusso di lavoro",
|
||||
"description": "Avvia un nuovo flusso di lavoro da zero"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Carica flusso di lavoro da file",
|
||||
"description": "Carica un flusso di lavoro per iniziare con una configurazione esistente"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Carica l'immagine da ampliare",
|
||||
"description": "Fai clic o trascina un'immagine per ingrandirla (JPG, PNG, WebP fino a 100 MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Sostituisci l'immagine corrente",
|
||||
"description": "Fai clic o trascina una nuova immagine per sostituire quella corrente"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Immagine pronta",
|
||||
"description": "Premere Invoke per iniziare l'ampliamento"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Pronto per ampliare!",
|
||||
"description": "Configura le impostazioni qui sotto, quindi fai clic sul pulsante Invoke per iniziare ad ampliare l'immagine."
|
||||
},
|
||||
"upscaleModel": "Modello per l'ampliamento",
|
||||
"model": "Modello",
|
||||
"scale": "Scala",
|
||||
"helpText": {
|
||||
"promptAdvice": "Durante l'ampliamento, utilizza un prompt che descriva il mezzo e lo stile. Evita di descrivere dettagli specifici del contenuto dell'immagine.",
|
||||
"styleAdvice": "L'ampliamento funziona meglio con lo stile generale dell'immagine."
|
||||
},
|
||||
"creativityAndStructure": {
|
||||
"title": "Creatività e struttura predefinite",
|
||||
"conservative": "Conservativo",
|
||||
"balanced": "Bilanciato",
|
||||
"creative": "Creativo",
|
||||
"artistic": "Artistico"
|
||||
}
|
||||
},
|
||||
"createNewWorkflowFromScratch": "Crea un nuovo flusso di lavoro da zero",
|
||||
"browseAndLoadWorkflows": "Sfoglia e carica i flussi di lavoro esistenti",
|
||||
"addStyleRef": {
|
||||
"title": "Aggiungi un riferimento di stile",
|
||||
"description": "Aggiungi un'immagine per trasferirne l'aspetto."
|
||||
},
|
||||
"editImage": {
|
||||
"title": "Modifica immagine",
|
||||
"description": "Aggiungi un'immagine da perfezionare."
|
||||
},
|
||||
"generateFromText": {
|
||||
"title": "Genera da testo",
|
||||
"description": "Inserisci un prompt e genera."
|
||||
},
|
||||
"useALayoutImage": {
|
||||
"description": "Aggiungi un'immagine per controllare la composizione.",
|
||||
"title": "Usa una immagine guida"
|
||||
},
|
||||
"generate": {
|
||||
"canvasCalloutTitle": "Vuoi avere più controllo, modificare e affinare le tue immagini?",
|
||||
"canvasCalloutLink": "Per ulteriori funzionalità, vai su Tela."
|
||||
}
|
||||
},
|
||||
"panels": {
|
||||
"launchpad": "Rampa di lancio",
|
||||
"workflowEditor": "Editor del flusso di lavoro",
|
||||
"imageViewer": "Visualizzatore immagini",
|
||||
"canvas": "Tela"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -2386,7 +2621,10 @@
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento.",
|
||||
"tileControl": "Controllo del riquadro",
|
||||
"tileSize": "Dimensione del riquadro",
|
||||
"tileOverlap": "Sovrapposizione riquadro"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2436,7 +2674,8 @@
|
||||
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
||||
"noTemplates": "Nessun modello",
|
||||
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
||||
"promptTemplateCleared": "Modello di prompt cancellato"
|
||||
"promptTemplateCleared": "Modello di prompt cancellato",
|
||||
"togglePromptPreviews": "Attiva/disattiva le anteprime dei prompt"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
|
||||
@@ -2452,8 +2691,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
"Lo stato dello studio viene salvato sul server, consentendoti di continuare a lavorare su qualsiasi dispositivo.",
|
||||
"Supporto per più immagini di riferimento per FLUX Kontext (solo modello locale)."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
@@ -2485,64 +2724,18 @@
|
||||
"supportVideos": {
|
||||
"gettingStarted": "Iniziare",
|
||||
"supportVideos": "Video di supporto",
|
||||
"videos": {
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilizzo di livelli di controllo e guide di riferimento",
|
||||
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
|
||||
"title": "Creazione della tua prima immagine"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
|
||||
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
|
||||
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Come si usano le maschere Inpaint?",
|
||||
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
|
||||
"title": "Come posso eseguire l'outpainting?"
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
|
||||
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Ampliamento",
|
||||
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
|
||||
"title": "Creare e comporre sulla tela di controllo di Invoke"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Passaggi per generare e salvare le immagini nella galleria.",
|
||||
"title": "Come posso generare e salvare nella Galleria?"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Come posso apportare modifiche sulla tela?",
|
||||
"description": "Guida alla modifica delle immagini direttamente sulla tela."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
|
||||
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
|
||||
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Tela di Controllo",
|
||||
"watch": "Guarda",
|
||||
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
|
||||
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
|
||||
"studioSessionsDesc": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e porre domande. Le sessioni vengono caricate nella playlist la settimana successiva.",
|
||||
"videos": {
|
||||
"gettingStarted": {
|
||||
"title": "Introduzione a Invoke",
|
||||
"description": "Serie video completa che copre tutto ciò che devi sapere per iniziare a usare Invoke, dalla creazione della tua prima immagine alle tecniche avanzate."
|
||||
},
|
||||
"studioSessions": {
|
||||
"title": "Sessioni in studio",
|
||||
"description": "Sessioni approfondite che esplorano le funzionalità avanzate di Invoke, i flussi di lavoro creativi e le discussioni della community."
|
||||
}
|
||||
}
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Cancella la cache del modello",
|
||||
|
||||
@@ -141,7 +141,7 @@
|
||||
"loading": "ロード中",
|
||||
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
||||
"drop": "ドロップ",
|
||||
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||
"dropOrUpload": "ドロップまたはアップロード",
|
||||
"deleteImage_other": "画像 {{count}} 枚を削除",
|
||||
"deleteImagePermanent": "削除された画像は復元できません。",
|
||||
"download": "ダウンロード",
|
||||
@@ -193,7 +193,8 @@
|
||||
"images": "画像",
|
||||
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
|
||||
"imagesTab": "Invoke内で作成および保存された画像。",
|
||||
"assets": "アセット"
|
||||
"assets": "アセット",
|
||||
"useForPromptGeneration": "プロンプト生成に使用する"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
@@ -363,6 +364,16 @@
|
||||
"selectRectTool": {
|
||||
"title": "矩形ツール",
|
||||
"desc": "矩形ツールを選択します。"
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "行動",
|
||||
"display": "ディスプレイ",
|
||||
"grid": "グリッド",
|
||||
"debug": "デバッグ"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "非ラスターレイヤーの切り替え",
|
||||
"desc": "ラスター以外のレイヤー カテゴリ (コントロール レイヤー、インペイント マスク、地域ガイダンス) を表示または非表示にします。"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -630,7 +641,7 @@
|
||||
"restoreDefaultSettings": "クリックするとモデルのデフォルト設定が使用されます.",
|
||||
"hfTokenSaved": "ハギングフェイストークンを保存しました",
|
||||
"imageEncoderModelId": "画像エンコーダーモデルID",
|
||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます",
|
||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます。",
|
||||
"learnMoreAboutSupportedModels": "私たちのサポートしているモデルについて更に学ぶ",
|
||||
"modelImageUpdateFailed": "モデル画像アップデート失敗",
|
||||
"scanFolder": "スキャンフォルダ",
|
||||
@@ -654,7 +665,30 @@
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
"showOnlyRelatedModels": "関連している",
|
||||
"installedModelsCount": "{{total}} モデルのうち {{installed}} 個がインストールされています。",
|
||||
"allNModelsInstalled": "{{count}} 個のモデルがすべてインストールされています",
|
||||
"nToInstall": "{{count}}個をインストールする",
|
||||
"nAlreadyInstalled": "{{count}} 個すでにインストールされています",
|
||||
"bundleAlreadyInstalled": "バンドルがすでにインストールされています",
|
||||
"bundleAlreadyInstalledDesc": "{{bundleName}} バンドル内のすべてのモデルはすでにインストールされています。",
|
||||
"launchpadTab": "ランチパッド",
|
||||
"launchpad": {
|
||||
"welcome": "モデルマネジメントへようこそ",
|
||||
"description": "Invoke プラットフォームのほとんどの機能を利用するには、モデルのインストールが必要です。手動インストールオプションから選択するか、厳選されたスターターモデルをご覧ください。",
|
||||
"manualInstall": "マニュアルインストール",
|
||||
"urlDescription": "URLまたはローカルファイルパスからモデルをインストールします。特定のモデルを追加したい場合に最適です。",
|
||||
"huggingFaceDescription": "HuggingFace リポジトリからモデルを直接参照してインストールします。",
|
||||
"scanFolderDescription": "ローカルフォルダをスキャンしてモデルを自動的に検出し、インストールします。",
|
||||
"recommendedModels": "推奨モデル",
|
||||
"exploreStarter": "または、利用可能なすべてのスターターモデルを参照してください",
|
||||
"quickStart": "クイックスタートバンドル",
|
||||
"bundleDescription": "各バンドルには各モデルファミリーの必須モデルと、開始するための厳選されたベースモデルが含まれています。",
|
||||
"browseAll": "または、利用可能なすべてのモデルを参照してください。",
|
||||
"stableDiffusion15": "Stable Diffusion1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -720,7 +754,10 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
|
||||
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
||||
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。"
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
|
||||
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
|
||||
"promptExpansionPending": "プロンプト拡張が進行中",
|
||||
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
@@ -875,7 +912,26 @@
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください.",
|
||||
"noRasterLayers": "ラスターレイヤーが見つかりません",
|
||||
"noRasterLayersDesc": "PSDにエクスポートするには、少なくとも1つのラスターレイヤーを作成します",
|
||||
"noActiveRasterLayers": "アクティブなラスターレイヤーがありません",
|
||||
"noActiveRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
||||
"noVisibleRasterLayers": "表示されるラスター レイヤーがありません",
|
||||
"noVisibleRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
||||
"invalidCanvasDimensions": "キャンバスのサイズが無効です",
|
||||
"canvasTooLarge": "キャンバスが大きすぎます",
|
||||
"canvasTooLargeDesc": "キャンバスのサイズがPSDエクスポートの最大許容サイズを超えています。キャンバス全体の幅と高さを小さくしてから、もう一度お試しください。",
|
||||
"failedToProcessLayers": "レイヤーの処理に失敗しました",
|
||||
"psdExportSuccess": "PSDエクスポート完了",
|
||||
"psdExportSuccessDesc": "{{count}} 個のレイヤーを PSD ファイルに正常にエクスポートしました",
|
||||
"problemExportingPSD": "PSD のエクスポート中に問題が発生しました",
|
||||
"canvasManagerNotAvailable": "キャンバスマネージャーは利用できません",
|
||||
"noValidLayerAdapters": "有効なレイヤーアダプタが見つかりません",
|
||||
"fluxKontextIncompatibleGenerationMode": "Flux Kontext はテキストから画像への変換のみをサポートしています。画像から画像への変換、インペインティング、アウトペインティングのタスクには他のモデルを使用してください。",
|
||||
"promptGenerationStarted": "プロンプト生成が開始されました",
|
||||
"uploadAndPromptGenerationFailed": "画像のアップロードとプロンプトの生成に失敗しました",
|
||||
"promptExpansionFailed": "プロンプト拡張に失敗しました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -1014,7 +1070,8 @@
|
||||
"lora": "LoRA",
|
||||
"defaultVAE": "デフォルトVAE",
|
||||
"noLoRAsInstalled": "インストールされているLoRAはありません",
|
||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません"
|
||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません",
|
||||
"noCompatibleLoRAs": "互換性のあるLoRAはありません"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -1708,7 +1765,16 @@
|
||||
"prompt": {
|
||||
"addPromptTrigger": "プロンプトトリガーを追加",
|
||||
"compatibleEmbeddings": "互換性のある埋め込み",
|
||||
"noMatchingTriggers": "一致するトリガーがありません"
|
||||
"noMatchingTriggers": "一致するトリガーがありません",
|
||||
"generateFromImage": "画像からプロンプトを生成する",
|
||||
"expandCurrentPrompt": "現在のプロンプトを展開",
|
||||
"uploadImageForPromptGeneration": "プロンプト生成用の画像をアップロードする",
|
||||
"expandingPrompt": "プロンプトを展開しています...",
|
||||
"resultTitle": "プロンプト拡張完了",
|
||||
"resultSubtitle": "拡張プロンプトの処理方法を選択します:",
|
||||
"replace": "交換する",
|
||||
"insert": "挿入する",
|
||||
"discard": "破棄する"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -1716,7 +1782,60 @@
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"models": "モデル",
|
||||
"gallery": "ギャラリー"
|
||||
"gallery": "ギャラリー",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"upscaling": "アップスケーリング",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
},
|
||||
"launchpad": {
|
||||
"upscaling": {
|
||||
"model": "モデル",
|
||||
"scale": "スケール",
|
||||
"helpText": {
|
||||
"promptAdvice": "アップスケールする際は、媒体とスタイルを説明するプロンプトを使用してください。画像内の具体的なコンテンツの詳細を説明することは避けてください。",
|
||||
"styleAdvice": "アップスケーリングは、画像の全体的なスタイルに最適です。"
|
||||
},
|
||||
"uploadImage": {
|
||||
"title": "アップスケール用の画像をアップロードする",
|
||||
"description": "アップスケールするには、画像をクリックまたはドラッグします(JPG、PNG、WebP、最大100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "現在の画像を置き換える",
|
||||
"description": "新しい画像をクリックまたはドラッグして、現在の画像を置き換えます"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "画像準備完了",
|
||||
"description": "アップスケールを開始するにはInvokeを押してください"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "アップスケールの準備ができました!",
|
||||
"description": "以下の設定を構成し、「Invoke」ボタンをクリックして画像のアップスケールを開始します。"
|
||||
},
|
||||
"upscaleModel": "アップスケールモデル"
|
||||
},
|
||||
"workflowsTitle": "ワークフローを詳しく見てみましょう。",
|
||||
"upscalingTitle": "アップスケールして詳細を追加します。",
|
||||
"canvasTitle": "キャンバス上で編集および調整します。",
|
||||
"generateTitle": "テキストプロンプトから画像を生成します。",
|
||||
"modelGuideText": "各モデルに最適なプロンプトを知りたいですか?",
|
||||
"modelGuideLink": "モデルガイドをご覧ください。",
|
||||
"workflows": {
|
||||
"description": "ワークフローは、画像生成タスクを自動化する再利用可能なテンプレートであり、複雑な操作を迅速に実行して一貫した結果を得ることができます。",
|
||||
"learnMoreLink": "ワークフローの作成について詳しく見る",
|
||||
"browseTemplates": {
|
||||
"title": "ワークフローテンプレートを参照する",
|
||||
"description": "一般的なタスク用にあらかじめ構築されたワークフローから選択する"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "新規ワークフローを作成する",
|
||||
"description": "新しいワークフローをゼロから始める"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "ファイルからワークフローを読み込む",
|
||||
"description": "既存の設定から開始するためのワークフローをアップロードする"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
@@ -1732,7 +1851,16 @@
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像",
|
||||
"canvasGroup": "キャンバス"
|
||||
"canvasGroup": "キャンバス",
|
||||
"saveToGalleryGroup": "ギャラリーに保存",
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "Bボックスをギャラリーに保存",
|
||||
"newControlLayer": "新規コントロールレイヤー",
|
||||
"newRasterLayer": "新規ラスターレイヤー",
|
||||
"newInpaintMask": "新規インペイントマスク",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"copyCanvasToClipboard": "キャンバスをクリップボードにコピー",
|
||||
"copyBboxToClipboard": "Bボックスをクリップボードにコピー"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
@@ -1743,7 +1871,11 @@
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
"reset": "リセット",
|
||||
"fitMode": "フィットモード",
|
||||
"fitModeContain": "含む",
|
||||
"fitModeCover": "カバー",
|
||||
"fitModeFill": "満たす"
|
||||
},
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
@@ -1754,7 +1886,8 @@
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム",
|
||||
"bbox": "Bbox"
|
||||
"bbox": "Bbox",
|
||||
"view": "ビュー"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
@@ -1774,25 +1907,386 @@
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました",
|
||||
"controlMode": {
|
||||
"prompt": "プロンプト"
|
||||
"prompt": "プロンプト",
|
||||
"controlMode": "コントロールモード",
|
||||
"balanced": "バランス(推奨)",
|
||||
"control": "コントロール",
|
||||
"megaControl": "メガコントロール"
|
||||
},
|
||||
"prompt": "プロンプト",
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"off": "オフ",
|
||||
"on": "オン"
|
||||
}
|
||||
"on": "オン",
|
||||
"label": "グリッドにスナップ"
|
||||
},
|
||||
"preserveMask": {
|
||||
"label": "マスクされた領域を保持",
|
||||
"alert": "マスクされた領域の保存"
|
||||
},
|
||||
"isolatedStagingPreview": "分離されたステージングプレビュー",
|
||||
"isolatedPreview": "分離されたプレビュー",
|
||||
"isolatedLayerPreview": "分離されたレイヤーのプレビュー",
|
||||
"isolatedLayerPreviewDesc": "フィルタリングや変換などの操作を実行するときに、このレイヤーのみを表示するかどうか。",
|
||||
"invertBrushSizeScrollDirection": "ブラシサイズのスクロール反転",
|
||||
"pressureSensitivity": "圧力感度"
|
||||
},
|
||||
"filter": {
|
||||
"filter": "フィルター",
|
||||
"spandrel_filter": {
|
||||
"model": "モデル"
|
||||
"model": "モデル",
|
||||
"label": "img2imgモデル",
|
||||
"description": "選択したレイヤーでimg2imgモデルを実行します。",
|
||||
"autoScale": "オートスケール",
|
||||
"autoScaleDesc": "選択したモデルは、目標スケールに達するまで実行されます。",
|
||||
"scale": "ターゲットスケール"
|
||||
},
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"cancel": "キャンセル"
|
||||
"cancel": "キャンセル",
|
||||
"filters": "フィルター",
|
||||
"filterType": "フィルタータイプ",
|
||||
"autoProcess": "オートプロセス",
|
||||
"process": "プロセス",
|
||||
"advanced": "アドバンスド",
|
||||
"processingLayerWith": "{{type}} フィルターを使用した処理レイヤー。",
|
||||
"forMoreControl": "さらに細かく制御するには、以下の「詳細設定」をクリックしてください。",
|
||||
"canny_edge_detection": {
|
||||
"label": "キャニーエッジ検出",
|
||||
"description": "Canny エッジ検出アルゴリズムを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"low_threshold": "低閾値",
|
||||
"high_threshold": "高閾値"
|
||||
},
|
||||
"color_map": {
|
||||
"label": "カラーマップ",
|
||||
"description": "選択したレイヤーからカラーマップを作成します。",
|
||||
"tile_size": "タイルサイズ"
|
||||
},
|
||||
"content_shuffle": {
|
||||
"label": "コンテンツシャッフル",
|
||||
"description": "選択したレイヤーのコンテンツを、「液化」効果と同様にシャッフルします。",
|
||||
"scale_factor": "スケール係数"
|
||||
},
|
||||
"depth_anything_depth_estimation": {
|
||||
"label": "デプスエニシング",
|
||||
"description": "デプスエニシングモデルを使用して、選択したレイヤーから深度マップを生成します。",
|
||||
"model_size": "モデルサイズ",
|
||||
"model_size_small": "スモール",
|
||||
"model_size_small_v2": "スモールv2",
|
||||
"model_size_base": "ベース",
|
||||
"model_size_large": "ラージ"
|
||||
},
|
||||
"dw_openpose_detection": {
|
||||
"label": "DW オープンポーズ検出",
|
||||
"description": "DW Openpose モデルを使用して、選択したレイヤー内の人間のポーズを検出します。",
|
||||
"draw_hands": "手を描く",
|
||||
"draw_face": "顔を描く",
|
||||
"draw_body": "体を描く"
|
||||
},
|
||||
"hed_edge_detection": {
|
||||
"label": "HEDエッジ検出",
|
||||
"description": "HED エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"scribble": "落書き"
|
||||
},
|
||||
"lineart_anime_edge_detection": {
|
||||
"label": "線画アニメのエッジ検出",
|
||||
"description": "線画アニメエッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。"
|
||||
},
|
||||
"lineart_edge_detection": {
|
||||
"label": "線画エッジ検出",
|
||||
"description": "線画エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"coarse": "粗い"
|
||||
},
|
||||
"mediapipe_face_detection": {
|
||||
"label": "メディアパイプ顔検出",
|
||||
"description": "メディアパイプ顔検出モデルを使用して、選択したレイヤー内の顔を検出します。",
|
||||
"max_faces": "マックスフェイス",
|
||||
"min_confidence": "最小信頼度"
|
||||
},
|
||||
"mlsd_detection": {
|
||||
"label": "線分検出",
|
||||
"description": "MLSD 線分検出モデルを使用して、選択したレイヤーから線分マップを生成します。",
|
||||
"score_threshold": "スコア閾値",
|
||||
"distance_threshold": "距離閾値"
|
||||
},
|
||||
"normal_map": {
|
||||
"label": "ノーマルマップ",
|
||||
"description": "選択したレイヤーからノーマルマップを生成します。"
|
||||
},
|
||||
"pidi_edge_detection": {
|
||||
"label": "PiDiNetエッジ検出",
|
||||
"description": "PiDiNet エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"scribble": "落書き",
|
||||
"quantize_edges": "エッジを量子化する"
|
||||
},
|
||||
"img_blur": {
|
||||
"label": "画像をぼかす",
|
||||
"description": "選択したレイヤーをぼかします。",
|
||||
"blur_type": "ぼかしの種類",
|
||||
"blur_radius": "半径",
|
||||
"gaussian_type": "ガウス分布",
|
||||
"box_type": "ボックス"
|
||||
},
|
||||
"img_noise": {
|
||||
"label": "ノイズ画像",
|
||||
"description": "選択したレイヤーにノイズを追加します。",
|
||||
"noise_type": "ノイズの種類",
|
||||
"noise_amount": "総計",
|
||||
"gaussian_type": "ガウス分布",
|
||||
"salt_and_pepper_type": "塩コショウ",
|
||||
"noise_color": "カラーノイズ",
|
||||
"size": "ノイズサイズ"
|
||||
},
|
||||
"adjust_image": {
|
||||
"label": "画像を調整する",
|
||||
"description": "画像の選択したチャンネルを調整します。",
|
||||
"channel": "チャンネル",
|
||||
"value_setting": "バリュー",
|
||||
"scale_values": "スケールバリュー",
|
||||
"red": "赤(RGBA)",
|
||||
"green": "緑(RGBA)",
|
||||
"blue": "青(RGBA)",
|
||||
"alpha": "アルファ(RGBA)",
|
||||
"cyan": "シアン(CMYK)",
|
||||
"magenta": "マゼンタ(CMYK)",
|
||||
"yellow": "黄色(CMYK)",
|
||||
"black": "黒(CMYK)",
|
||||
"hue": "色相(HSV)",
|
||||
"saturation": "彩度(HSV)",
|
||||
"value": "値(HSV)",
|
||||
"luminosity": "明度(LAB)",
|
||||
"a": "A(ラボ)",
|
||||
"b": "B(ラボ)",
|
||||
"y": "Y(YCbCr)",
|
||||
"cb": "Cb(YCbCr)",
|
||||
"cr": "Cr(YCbCr)"
|
||||
}
|
||||
},
|
||||
"weight": "重み"
|
||||
"weight": "重み",
|
||||
"bookmark": "クイックスイッチのブックマーク",
|
||||
"exportCanvasToPSD": "キャンバスをPSDにエクスポート",
|
||||
"savedToGalleryError": "ギャラリーへの保存中にエラーが発生しました",
|
||||
"regionCopiedToClipboard": "{{region}} をクリップボードにコピーしました",
|
||||
"copyRegionError": "{{region}} のコピー中にエラーが発生しました",
|
||||
"newGlobalReferenceImageOk": "作成されたグローバル参照画像",
|
||||
"newGlobalReferenceImageError": "グローバル参照イメージの作成中に問題が発生しました",
|
||||
"newRegionalReferenceImageOk": "地域参照画像の作成",
|
||||
"newRegionalReferenceImageError": "地域参照画像の作成中に問題が発生しました",
|
||||
"newControlLayerOk": "制御レイヤーの作成",
|
||||
"newControlLayerError": "制御層の作成中に問題が発生しました",
|
||||
"newRasterLayerOk": "ラスターレイヤーを作成しました",
|
||||
"newRasterLayerError": "ラスターレイヤーの作成中に問題が発生しました",
|
||||
"pullBboxIntoLayerOk": "Bbox をレイヤーにプル",
|
||||
"pullBboxIntoLayerError": "BBox をレイヤーにプルする際に問題が発生しました",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox が ReferenceImage にプルされました",
|
||||
"pullBboxIntoReferenceImageError": "BBox を ReferenceImage にプルする際に問題が発生しました",
|
||||
"regionIsEmpty": "選択した領域は空です",
|
||||
"mergeVisible": "マージを可視化",
|
||||
"mergeVisibleOk": "マージされたレイヤー",
|
||||
"mergeVisibleError": "レイヤーの結合エラー",
|
||||
"mergingLayers": "レイヤーのマージ",
|
||||
"clearHistory": "履歴をクリア",
|
||||
"bboxOverlay": "Bboxオーバーレイを表示",
|
||||
"ruleOfThirds": "三分割法を表示",
|
||||
"newSession": "新しいセッション",
|
||||
"clearCaches": "キャッシュをクリア",
|
||||
"recalculateRects": "長方形を再計算する",
|
||||
"clipToBbox": "ストロークをBboxにクリップ",
|
||||
"outputOnlyMaskedRegions": "生成された領域のみを出力する",
|
||||
"width": "幅",
|
||||
"autoNegative": "オートネガティブ",
|
||||
"enableAutoNegative": "オートネガティブを有効にする",
|
||||
"disableAutoNegative": "オートネガティブを無効にする",
|
||||
"deletePrompt": "プロンプトを削除",
|
||||
"deleteReferenceImage": "参照画像を削除",
|
||||
"showHUD": "HUDを表示",
|
||||
"maskFill": "マスク塗りつぶし",
|
||||
"addPositivePrompt": "$t(controlLayers.prompt) を追加します",
|
||||
"addNegativePrompt": "$t(controlLayers.negativePrompt)を追加します",
|
||||
"addReferenceImage": "$t(controlLayers.referenceImage)を追加します",
|
||||
"addImageNoise": "$t(controlLayers.imageNoise)を追加します",
|
||||
"addRasterLayer": "$t(controlLayers.rasterLayer)を追加します",
|
||||
"addControlLayer": "$t(controlLayers.controlLayer)を追加します",
|
||||
"addInpaintMask": "$t(controlLayers.inpaintMask)を追加します",
|
||||
"addRegionalGuidance": "$t(controlLayers.regionalGuidance)を追加します",
|
||||
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage)を追加します",
|
||||
"addDenoiseLimit": "$t(controlLayers.denoiseLimit)を追加します",
|
||||
"controlLayer": "コントロールレイヤー",
|
||||
"inpaintMask": "インペイントマスク",
|
||||
"referenceImageRegional": "参考画像(地域別)",
|
||||
"referenceImageGlobal": "参考画像(グローバル)",
|
||||
"asRasterLayer": "$t(controlLayers.rasterLayer) として",
|
||||
"asRasterLayerResize": "$t(controlLayers.rasterLayer) として (リサイズ)",
|
||||
"asControlLayer": "$t(controlLayers.controlLayer) として",
|
||||
"asControlLayerResize": "$t(controlLayers.controlLayer) として (リサイズ)",
|
||||
"referenceImage": "参照画像",
|
||||
"sendingToCanvas": "キャンバスに生成をのせる",
|
||||
"sendingToGallery": "生成をギャラリーに送る",
|
||||
"sendToGallery": "ギャラリーに送る",
|
||||
"sendToGalleryDesc": "Invokeを押すとユニークな画像が生成され、ギャラリーに保存されます。",
|
||||
"sendToCanvas": "キャンバスに送る",
|
||||
"newLayerFromImage": "画像から新規レイヤー",
|
||||
"newCanvasFromImage": "画像から新規キャンバス",
|
||||
"newImg2ImgCanvasFromImage": "画像からの新規 Img2Img",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"sendToCanvasDesc": "Invokeを押すと、進行中の作品がキャンバス上にステージされます。",
|
||||
"viewProgressInViewer": "<Btn>画像ビューア</Btn>で進行状況と出力を表示します。",
|
||||
"viewProgressOnCanvas": "<Btn>キャンバス</Btn> で進行状況とステージ出力を表示します。",
|
||||
"rasterLayer_withCount_other": "ラスターレイヤー",
|
||||
"controlLayer_withCount_other": "コントロールレイヤー",
|
||||
"regionalGuidance_withCount_hidden": "地域ガイダンス({{count}} 件非表示)",
|
||||
"controlLayers_withCount_hidden": "コントロールレイヤー({{count}} 個非表示)",
|
||||
"rasterLayers_withCount_hidden": "ラスター レイヤー ({{count}} 個非表示)",
|
||||
"globalReferenceImages_withCount_hidden": "グローバル参照画像({{count}} 枚非表示)",
|
||||
"regionalGuidance_withCount_visible": "地域ガイダンス ({{count}})",
|
||||
"controlLayers_withCount_visible": "コントロールレイヤー ({{count}})",
|
||||
"rasterLayers_withCount_visible": "ラスターレイヤー({{count}})",
|
||||
"globalReferenceImages_withCount_visible": "グローバル参照画像 ({{count}})",
|
||||
"layer_other": "レイヤー",
|
||||
"layer_withCount_other": "レイヤー ({{count}})",
|
||||
"convertRasterLayerTo": "$t(controlLayers.rasterLayer) を変換する",
|
||||
"convertControlLayerTo": "$t(controlLayers.controlLayer) を変換する",
|
||||
"convertRegionalGuidanceTo": "$t(controlLayers.regionalGuidance) を変換する",
|
||||
"copyRasterLayerTo": "$t(controlLayers.rasterLayer)をコピーする",
|
||||
"copyControlLayerTo": "$t(controlLayers.controlLayer) をコピーする",
|
||||
"copyRegionalGuidanceTo": "$t(controlLayers.regionalGuidance)をコピーする",
|
||||
"newRasterLayer": "新しい $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新しい $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "新しい $t(controlLayers.inpaintMask)",
|
||||
"newRegionalGuidance": "新しい $t(controlLayers.regionalGuidance)",
|
||||
"pasteTo": "貼り付け先",
|
||||
"pasteToAssets": "アセット",
|
||||
"pasteToAssetsDesc": "アセットに貼り付け",
|
||||
"pasteToBbox": "Bボックス",
|
||||
"pasteToBboxDesc": "新しいレイヤー(Bbox内)",
|
||||
"pasteToCanvas": "キャンバス",
|
||||
"pasteToCanvasDesc": "新しいレイヤー(キャンバス内)",
|
||||
"pastedTo": "{{destination}} に貼り付けました",
|
||||
"transparency": "透明性",
|
||||
"enableTransparencyEffect": "透明効果を有効にする",
|
||||
"disableTransparencyEffect": "透明効果を無効にする",
|
||||
"hidingType": "{{type}} を非表示",
|
||||
"showingType": "{{type}}を表示",
|
||||
"showNonRasterLayers": "非ラスターレイヤーを表示 (Shift+H)",
|
||||
"hideNonRasterLayers": "非ラスターレイヤーを非表示にする (Shift+H)",
|
||||
"dynamicGrid": "ダイナミックグリッド",
|
||||
"logDebugInfo": "デバッグ情報をログに記録する",
|
||||
"locked": "ロックされています",
|
||||
"unlocked": "ロック解除",
|
||||
"deleteSelected": "選択項目を削除",
|
||||
"stagingOnCanvas": "ステージング画像",
|
||||
"replaceLayer": "レイヤーの置き換え",
|
||||
"pullBboxIntoLayer": "Bboxをレイヤーに引き込む",
|
||||
"pullBboxIntoReferenceImage": "Bboxを参照画像に取り込む",
|
||||
"showProgressOnCanvas": "キャンバスに進捗状況を表示",
|
||||
"useImage": "画像を使う",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"beginEndStepPercentShort": "開始/終了 %",
|
||||
"newGallerySession": "新しいギャラリーセッション",
|
||||
"newGallerySessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成した画像はギャラリーに送信されます。",
|
||||
"newCanvasSession": "新規キャンバスセッション",
|
||||
"newCanvasSessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成はキャンバス上でステージングされます。",
|
||||
"resetCanvasLayers": "キャンバスレイヤーをリセット",
|
||||
"resetGenerationSettings": "生成設定をリセット",
|
||||
"replaceCurrent": "現在のものを置き換える",
|
||||
"controlLayerEmptyState": "<UploadButton>画像をアップロード</UploadButton>、<GalleryButton>ギャラリー</GalleryButton>からこのレイヤーに画像をドラッグ、<PullBboxButton>境界ボックスをこのレイヤーにプル</PullBboxButton>、またはキャンバスに描画して開始します。",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグするか、<PullBboxButton>境界ボックスをこの参照画像にプル</PullBboxButton>します。",
|
||||
"referenceImageEmptyState": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグします。",
|
||||
"uploadOrDragAnImage": "ギャラリーから画像をドラッグするか、<UploadButton>画像をアップロード</UploadButton>します。",
|
||||
"imageNoise": "画像ノイズ",
|
||||
"denoiseLimit": "ノイズ除去制限",
|
||||
"warnings": {
|
||||
"problemsFound": "問題が見つかりました",
|
||||
"unsupportedModel": "選択したベースモデルではレイヤーがサポートされていません",
|
||||
"controlAdapterNoModelSelected": "制御レイヤーモデルが選択されていません",
|
||||
"controlAdapterIncompatibleBaseModel": "互換性のない制御レイヤーベースモデル",
|
||||
"controlAdapterNoControl": "コントロールが選択/描画されていません",
|
||||
"ipAdapterNoModelSelected": "参照画像モデルが選択されていません",
|
||||
"ipAdapterIncompatibleBaseModel": "互換性のない参照画像ベースモデル",
|
||||
"ipAdapterNoImageSelected": "参照画像が選択されていません",
|
||||
"rgNoPromptsOrIPAdapters": "テキストプロンプトや参照画像はありません",
|
||||
"rgNegativePromptNotSupported": "選択されたベースモデルでは否定プロンプトはサポートされていません",
|
||||
"rgReferenceImagesNotSupported": "選択されたベースモデルでは地域の参照画像はサポートされていません",
|
||||
"rgAutoNegativeNotSupported": "選択したベースモデルでは自動否定はサポートされていません",
|
||||
"rgNoRegion": "領域が描画されていません",
|
||||
"fluxFillIncompatibleWithControlLoRA": "コントロールLoRAはFLUX Fillと互換性がありません"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "画像が見つかりません",
|
||||
"unableToLoadImage": "画像を読み込めません"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "モード",
|
||||
"full": "スタイルと構成",
|
||||
"fullDesc": "視覚スタイル (色、テクスチャ) と構成 (レイアウト、構造) を適用します。",
|
||||
"style": "スタイル(シンプル)",
|
||||
"styleDesc": "レイアウトを考慮せずに視覚スタイル(色、テクスチャ)を適用します。以前は「スタイルのみ」と呼ばれていました。",
|
||||
"composition": "構成のみ",
|
||||
"compositionDesc": "参照スタイルを無視してレイアウトと構造を複製します。",
|
||||
"styleStrong": "スタイル(ストロング)",
|
||||
"styleStrongDesc": "構成への影響をわずかに抑えて、強力なビジュアル スタイルを適用します。",
|
||||
"stylePrecise": "スタイル(正確)",
|
||||
"stylePreciseDesc": "被写体の影響を排除し、正確な視覚スタイルを適用します。"
|
||||
},
|
||||
"fluxReduxImageInfluence": {
|
||||
"imageInfluence": "イメージの影響力",
|
||||
"lowest": "最低",
|
||||
"low": "低",
|
||||
"medium": "中",
|
||||
"high": "高",
|
||||
"highest": "最高"
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "塗りつぶし色",
|
||||
"fillStyle": "塗りつぶしスタイル",
|
||||
"solid": "固体",
|
||||
"grid": "グリッド",
|
||||
"crosshatch": "クロスハッチ",
|
||||
"vertical": "垂直",
|
||||
"horizontal": "水平",
|
||||
"diagonal": "対角線"
|
||||
},
|
||||
"selectObject": {
|
||||
"selectObject": "オブジェクトを選択",
|
||||
"pointType": "ポイントタイプ",
|
||||
"invertSelection": "選択範囲を反転",
|
||||
"include": "含む",
|
||||
"exclude": "除外",
|
||||
"neutral": "ニュートラル",
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"saveAs": "名前を付けて保存",
|
||||
"cancel": "キャンセル",
|
||||
"process": "プロセス",
|
||||
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
|
||||
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
|
||||
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
|
||||
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
|
||||
"dragToMove": "ポイントをドラッグして移動します",
|
||||
"clickToRemove": "ポイントをクリックして削除します"
|
||||
},
|
||||
"HUD": {
|
||||
"bbox": "Bボックス",
|
||||
"scaledBbox": "スケールされたBボックス",
|
||||
"entityStatus": {
|
||||
"isFiltering": "{{title}} はフィルタリング中です",
|
||||
"isTransforming": "{{title}}は変化しています",
|
||||
"isLocked": "{{title}}はロックされています",
|
||||
"isHidden": "{{title}}は非表示になっています",
|
||||
"isDisabled": "{{title}}は無効です",
|
||||
"isEmpty": "{{title}} は空です"
|
||||
}
|
||||
},
|
||||
"stagingArea": {
|
||||
"accept": "受け入れる",
|
||||
"discardAll": "すべて破棄",
|
||||
"discard": "破棄する",
|
||||
"previous": "前へ",
|
||||
"next": "次へ",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"showResultsOn": "結果を表示",
|
||||
"showResultsOff": "結果を隠す"
|
||||
}
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
@@ -1810,13 +2304,56 @@
|
||||
"nameColumn": "'name'",
|
||||
"type": "タイプ",
|
||||
"private": "プライベート",
|
||||
"name": "名称"
|
||||
"name": "名称",
|
||||
"active": "アクティブ",
|
||||
"copyTemplate": "テンプレートをコピー",
|
||||
"deleteImage": "画像を削除",
|
||||
"deleteTemplate": "テンプレートを削除",
|
||||
"deleteTemplate2": "このテンプレートを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"exportPromptTemplates": "プロンプトテンプレートをエクスポートする(CSV)",
|
||||
"editTemplate": "テンプレートを編集",
|
||||
"exportDownloaded": "エクスポートをダウンロードしました",
|
||||
"exportFailed": "生成とCSVのダウンロードができません",
|
||||
"importTemplates": "プロンプトテンプレートのインポート(CSV/JSON)",
|
||||
"acceptedColumnsKeys": "受け入れられる列/キー:",
|
||||
"positivePromptColumn": "'プロンプト'または'ポジティブプロンプト'",
|
||||
"insertPlaceholder": "プレースホルダーを挿入",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"noTemplates": "テンプレートがありません",
|
||||
"noMatchingTemplates": "マッチするテンプレートがありません",
|
||||
"promptTemplatesDesc1": "プロンプトテンプレートは、プロンプトボックスに書き込むプロンプトにテキストを追加します。",
|
||||
"promptTemplatesDesc2": "テンプレート内でプロンプトを含める場所を指定するには <Pre>{{placeholder}}</Pre> のプレースホルダーの文字列を使用します。",
|
||||
"promptTemplatesDesc3": "プレースホルダーを省略すると、テンプレートはプロンプトの末尾に追加されます。",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"shared": "共有",
|
||||
"sharedTemplates": "テンプレートを共有",
|
||||
"templateDeleted": "プロンプトテンプレートを削除しました",
|
||||
"unableToDeleteTemplate": "プロンプトテンプレートを削除できません",
|
||||
"updatePromptTemplate": "プロンプトテンプレートをアップデート",
|
||||
"useForTemplate": "プロンプトテンプレートに使用する",
|
||||
"viewList": "テンプレートリストを表示",
|
||||
"viewModeTooltip": "現在選択されているテンプレートでは、プロンプトはこのようになります。プロンプトを編集するには、テキストボックス内の任意の場所をクリックしてください。",
|
||||
"togglePromptPreviews": "プロンプトプレビューを切り替える"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール",
|
||||
"scale": "スケール"
|
||||
"scale": "スケール",
|
||||
"creativity": "創造性",
|
||||
"exceedsMaxSize": "アップスケール設定が最大サイズ制限を超えています",
|
||||
"exceedsMaxSizeDetails": "アップスケールの上限は{{max Upscale Dimension}} x {{max Upscale Dimension}}ピクセルです。画像を小さくするか、スケールの選択範囲を小さくしてください。",
|
||||
"structure": "構造",
|
||||
"postProcessingMissingModelWarning": "後処理 (img2img) モデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
||||
"missingModelsWarning": "必要なモデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
||||
"mainModelDesc": "メインモデル(SD1.5またはSDXLアーキテクチャ)",
|
||||
"tileControlNetModelDesc": "選択したメインモデルアーキテクチャのタイルコントロールネットモデル",
|
||||
"upscaleModelDesc": "アップスケール(img2img)モデル",
|
||||
"missingUpscaleInitialImage": "アップスケール用の初期画像がありません",
|
||||
"missingUpscaleModel": "アップスケールモデルがありません",
|
||||
"missingTileControlNetModel": "有効なタイル コントロールネットモデルがインストールされていません",
|
||||
"incompatibleBaseModel": "アップスケーリングにサポートされていないメインモデルアーキテクチャです",
|
||||
"incompatibleBaseModelDesc": "アップスケーリングはSD1.5およびSDXLアーキテクチャモデルでのみサポートされています。アップスケーリングを有効にするには、メインモデルを変更してください。"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
@@ -1891,7 +2428,34 @@
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
"publishedWorkflowInputs": "インプット",
|
||||
"workflowLocked": "ワークフローがロックされました",
|
||||
"workflowLockedPublished": "公開済みのワークフローは編集用にロックされています。\nワークフローを非公開にして編集したり、コピーを作成したりできます。",
|
||||
"workflowLockedDuringPublishing": "公開の構成中にワークフローがロックされます。",
|
||||
"selectOutputNode": "出力ノードを選択",
|
||||
"changeOutputNode": "出力ノードの変更",
|
||||
"unpublishableInputs": "これらの公開できない入力は省略されます",
|
||||
"noPublishableInputs": "公開可能な入力はありません",
|
||||
"noOutputNodeSelected": "出力ノードが選択されていません",
|
||||
"cannotPublish": "ワークフローを公開できません",
|
||||
"publishWarnings": "警告",
|
||||
"errorWorkflowHasUnsavedChanges": "ワークフローに保存されていない変更があります",
|
||||
"errorWorkflowHasUnpublishableNodes": "ワークフローにはバッチ、ジェネレータ、またはメタデータ抽出ノードがあります",
|
||||
"errorWorkflowHasInvalidGraph": "ワークフロー グラフが無効です (詳細については [呼び出し] ボタンにマウスを移動してください)",
|
||||
"errorWorkflowHasNoOutputNode": "出力ノードが選択されていません",
|
||||
"warningWorkflowHasNoPublishableInputFields": "公開可能な入力フィールドが選択されていません - 公開されたワークフローはデフォルト値のみで実行されます",
|
||||
"warningWorkflowHasUnpublishableInputFields": "ワークフローには公開できない入力がいくつかあります。これらは公開されたワークフローから省略されます",
|
||||
"publishFailed": "公開失敗",
|
||||
"publishFailedDesc": "ワークフローの公開中に問題が発生しました。もう一度お試しください。",
|
||||
"publishSuccess": "ワークフローを公開しています",
|
||||
"publishSuccessDesc": "<LinkComponent>プロジェクト ダッシュボード</LinkComponent> をチェックして進捗状況を確認してください。",
|
||||
"publishInProgress": "公開中",
|
||||
"publishedWorkflowIsLocked": "公開されたワークフローはロックされています",
|
||||
"publishingValidationRun": "公開検証実行",
|
||||
"publishingValidationRunInProgress": "公開検証の実行が進行中です。",
|
||||
"publishedWorkflowsLocked": "公開済みのワークフローはロックされており、編集または実行できません。このワークフローを編集または実行するには、ワークフローを非公開にするか、コピーを保存してください。",
|
||||
"selectingOutputNode": "出力ノードの選択",
|
||||
"selectingOutputNodeDesc": "ノードをクリックして、ワークフローの出力ノードとして選択します。"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
@@ -1954,15 +2518,23 @@
|
||||
"models": "モデル",
|
||||
"canvas": "キャンバス",
|
||||
"metadata": "メタデータ",
|
||||
"queue": "キュー"
|
||||
"queue": "キュー",
|
||||
"logNamespaces": "ログのネームスペース",
|
||||
"dnd": "ドラッグ&ドロップ",
|
||||
"config": "構成",
|
||||
"generation": "生成",
|
||||
"events": "イベント"
|
||||
},
|
||||
"logLevel": {
|
||||
"debug": "Debug",
|
||||
"info": "Info",
|
||||
"error": "Error",
|
||||
"fatal": "Fatal",
|
||||
"warn": "Warn"
|
||||
}
|
||||
"warn": "Warn",
|
||||
"logLevel": "ログレベル",
|
||||
"trace": "追跡"
|
||||
},
|
||||
"enableLogging": "ログを有効にする"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"promptsPreview": "プロンプトプレビュー",
|
||||
@@ -1978,5 +2550,34 @@
|
||||
"dynamicPrompts": "ダイナミックプロンプト",
|
||||
"loading": "ダイナミックプロンプトを生成...",
|
||||
"maxPrompts": "最大プロンプト"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "チームメートを招待",
|
||||
"professional": "プロフェッショナル",
|
||||
"professionalUpsell": "InvokeのProfessional Editionでご利用いただけます。詳細については、こちらをクリックするか、invoke.com/pricingをご覧ください。",
|
||||
"shareAccess": "共有アクセス"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "始めるには、Invoke の実行に必要なモデルをダウンロードまたはインポートしてください。次に、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
||||
"toGetStarted": "開始するには、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
||||
"toGetStartedWorkflow": "開始するには、左側のフィールドに入力し、<StrongComponent>Invoke</StrongComponent> をクリックして画像を生成します。他のワークフローも試してみたい場合は、ワークフロータイトルの横にある<StrongComponent>フォルダアイコン</StrongComponent> をクリックすると、試せる他のテンプレートのリストが表示されます。",
|
||||
"gettingStartedSeries": "さらに詳しいガイダンスが必要ですか? Invoke Studio の可能性を最大限に引き出すためのヒントについては、<LinkComponent>入門シリーズ</LinkComponent>をご覧ください。",
|
||||
"lowVRAMMode": "最高のパフォーマンスを得るには、<LinkComponent>低 VRAM ガイド</LinkComponent>に従ってください。",
|
||||
"noModelsInstalled": "モデルがインストールされていないようです。<DownloadStarterModelsButton>スターターモデルバンドルをダウンロード</DownloadStarterModelsButton>するか、<ImportModelsButton>モデルをインポート</ImportModelsButton>してください。"
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Invokeの新機能",
|
||||
"items": [
|
||||
"インペインティング: マスクごとのノイズ レベルとノイズ除去の制限。",
|
||||
"キャンバス: SDXL のアスペクト比がスマートになり、スクロールによるズームが改善されました。"
|
||||
],
|
||||
"readReleaseNotes": "リリースノートを読む",
|
||||
"watchRecentReleaseVideos": "最近のリリースビデオを見る",
|
||||
"watchUiUpdatesOverview": "Watch UI アップデートの概要"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "サポートビデオ",
|
||||
"gettingStarted": "はじめる",
|
||||
"watch": "ウォッチ"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1931,7 +1931,6 @@
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Генерация",
|
||||
"canvas": "Холст",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Модели",
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
"bulkDownloadFailed": "Tải Xuống Thất Bại",
|
||||
"bulkDownloadRequestFailed": "Có Vấn Đề Khi Đang Chuẩn Bị Tải Xuống",
|
||||
"download": "Tải Xuống",
|
||||
"dropOrUpload": "$t(gallery.drop) Hoặc Tải Lên",
|
||||
"dropOrUpload": "Kéo Thả Hoặc Tải Lên",
|
||||
"currentlyInUse": "Hình ảnh này hiện đang sử dụng các tính năng sau:",
|
||||
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
|
||||
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
|
||||
@@ -111,7 +111,7 @@
|
||||
"noImageSelected": "Không Có Ảnh Được Chọn",
|
||||
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
|
||||
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
|
||||
"imagesTab": "hình bạn vừa được tạo và lưu trong Invoke.",
|
||||
"imagesTab": "Ảnh bạn vừa được tạo và lưu trong Invoke.",
|
||||
"loading": "Đang Tải",
|
||||
"oldestFirst": "Cũ Nhất Trước",
|
||||
"exitCompare": "Ngừng So Sánh",
|
||||
@@ -122,7 +122,8 @@
|
||||
"boardsSettings": "Thiết Lập Bảng",
|
||||
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
|
||||
"assets": "Tài Nguyên",
|
||||
"images": "Hình Ảnh"
|
||||
"images": "Hình Ảnh",
|
||||
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
|
||||
},
|
||||
"common": {
|
||||
"ipAdapter": "IP Adapter",
|
||||
@@ -254,9 +255,18 @@
|
||||
"options_withCount_other": "{{count}} thiết lập"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
|
||||
"compatibleEmbeddings": "Embedding Tương Thích",
|
||||
"noMatchingTriggers": "Không có trigger phù hợp"
|
||||
"noMatchingTriggers": "Không có trigger phù hợp",
|
||||
"generateFromImage": "Tạo sinh lệnh từ ảnh",
|
||||
"expandCurrentPrompt": "Mở Rộng Lệnh Hiện Tại",
|
||||
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
|
||||
"expandingPrompt": "Đang mở rộng lệnh...",
|
||||
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
|
||||
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
|
||||
"replace": "Thay Thế",
|
||||
"insert": "Chèn",
|
||||
"discard": "Huỷ Bỏ"
|
||||
},
|
||||
"queue": {
|
||||
"resume": "Tiếp Tục",
|
||||
@@ -289,7 +299,7 @@
|
||||
"pruneTooltip": "Cắt bớt {{item_count}} mục đã hoàn tất",
|
||||
"pruneSucceeded": "Đã cắt bớt {{item_count}} mục đã hoàn tất khỏi hàng",
|
||||
"clearTooltip": "Huỷ Và Dọn Dẹp Tất Cả Mục",
|
||||
"clearQueueAlertDialog": "Dọn dẹp hàng đợi sẽ ngay lập tức huỷ tất cả mục đang xử lý và làm sạch hàng hoàn toàn. Bộ lọc đang chờ xử lý sẽ bị huỷ bỏ.",
|
||||
"clearQueueAlertDialog": "Dọn dẹp hàng đợi sẽ ngay lập tức huỷ tất cả mục đang xử lý và làm sạch hàng hoàn toàn. Bộ lọc đang chờ xử lý sẽ bị huỷ bỏ và Vùng Dựng Canva sẽ được khởi động lại.",
|
||||
"session": "Phiên",
|
||||
"item": "Mục",
|
||||
"resumeFailed": "Có Vấn Đề Khi Tiếp Tục Bộ Xử Lý",
|
||||
@@ -333,13 +343,14 @@
|
||||
"retrySucceeded": "Mục Đã Thử Lại",
|
||||
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
|
||||
"retryItem": "Thử Lại Mục",
|
||||
"credits": "Nguồn"
|
||||
"credits": "Nguồn",
|
||||
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại"
|
||||
},
|
||||
"hotkeys": {
|
||||
"canvas": {
|
||||
"fitLayersToCanvas": {
|
||||
"title": "Xếp Vừa Layers Vào Canvas",
|
||||
"desc": "Căn chỉnh để góc nhìn vừa vặn với tất cả layer."
|
||||
"desc": "Căn chỉnh để góc nhìn vừa vặn với tất cả layer nhìn thấy dược."
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"desc": "Phóng to canvas lên 800%.",
|
||||
@@ -453,6 +464,34 @@
|
||||
"applyFilter": {
|
||||
"title": "Áp Dụng Bộ Lộc",
|
||||
"desc": "Áp dụng bộ lọc đang chờ sẵn cho layer được chọn."
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Hành Vi",
|
||||
"display": "Hiển Thị",
|
||||
"grid": "Lưới",
|
||||
"debug": "Gỡ Lỗi"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "Bật/Tắt Layer Không Thuộc Dạng Raster",
|
||||
"desc": "Hiện hoặc ẩn tất cả layer không thuộc dạng raster (Layer Điều Khiển Được, Lớp Phủ Inpaint, Chỉ Dẫn Khu Vực)."
|
||||
},
|
||||
"invertMask": {
|
||||
"title": "Đảo Ngược Lớp Phủ",
|
||||
"desc": "Đảo ngược lớp phủ inpaint được chọn, tạo một lớp phủ mới với độ trong suốt đối nghịch."
|
||||
},
|
||||
"fitBboxToMasks": {
|
||||
"title": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
|
||||
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào lớp phủ inpaint nhìn thấy được"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Áp Dụng Segment Anything",
|
||||
"desc": "Áp dụng lớp phủ Segment Anything hiện tại.",
|
||||
"key": "enter"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Huỷ Segment Anything",
|
||||
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
|
||||
"key": "esc"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -582,6 +621,10 @@
|
||||
"clearSelection": {
|
||||
"desc": "Xoá phần lựa chọn hiện tại nếu có.",
|
||||
"title": "Xoá Phần Lựa Chọn"
|
||||
},
|
||||
"starImage": {
|
||||
"title": "Dấu/Huỷ Sao Hình Ảnh",
|
||||
"desc": "Đánh dấu sao hoặc huỷ đánh dấu sao ảnh được chọn."
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
@@ -641,6 +684,11 @@
|
||||
"selectModelsTab": {
|
||||
"desc": "Chọn tab Model (Mô Hình).",
|
||||
"title": "Chọn Tab Model"
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Chọn Tab Tạo Sinh",
|
||||
"desc": "Chọn tab Tạo Sinh.",
|
||||
"key": "1"
|
||||
}
|
||||
},
|
||||
"searchHotkeys": "Tìm Phím tắt",
|
||||
@@ -695,7 +743,7 @@
|
||||
"cancel": "Huỷ",
|
||||
"huggingFace": "HuggingFace (HF)",
|
||||
"huggingFacePlaceholder": "chủ-sỡ-hữu/tên-model",
|
||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó",
|
||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó.",
|
||||
"localOnly": "chỉ ở trên máy chủ",
|
||||
"manual": "Thủ Công",
|
||||
"convertToDiffusersHelpText4": "Đây là quá trình diễn ra chỉ một lần. Nó có thể tốn tầm 30-60 giây tuỳ theo thông số kỹ thuật của máy tính.",
|
||||
@@ -742,7 +790,7 @@
|
||||
"simpleModelPlaceholder": "Url hoặc đường đẫn đến tệp hoặc thư mục chứa diffusers trong máy chủ",
|
||||
"selectModel": "Chọn Model",
|
||||
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
|
||||
"starterBundles": "Quà Tân Thủ",
|
||||
"starterBundles": "Gói Khởi Đầu",
|
||||
"vae": "VAE",
|
||||
"urlOrLocalPath": "URL / Đường Dẫn",
|
||||
"triggerPhrases": "Từ Ngữ Kích Hoạt",
|
||||
@@ -794,7 +842,30 @@
|
||||
"manageModels": "Quản Lý Model",
|
||||
"hfTokenReset": "Làm Mới HF Token",
|
||||
"relatedModels": "Model Liên Quan",
|
||||
"showOnlyRelatedModels": "Liên Quan"
|
||||
"showOnlyRelatedModels": "Liên Quan",
|
||||
"installedModelsCount": "Đã tải {{installed}} trên {{total}} model.",
|
||||
"allNModelsInstalled": "Đã tải tất cả {{count}} model",
|
||||
"nToInstall": "Còn {{count}} để tải",
|
||||
"nAlreadyInstalled": "Có {{count}} đã tải",
|
||||
"bundleAlreadyInstalled": "Gói đã được cài sẵn",
|
||||
"bundleAlreadyInstalledDesc": "Tất cả model trong gói {{bundleName}} đã được cài sẵn.",
|
||||
"launchpadTab": "Launchpad",
|
||||
"launchpad": {
|
||||
"welcome": "Chào mừng đến Trình Quản Lý Model",
|
||||
"description": "Invoke yêu cầu tải model nhằm tối ưu hoá các tính năng trên nền tảng. Chọn tải các phương án thủ công hoặc khám phá các model khởi đầu thích hợp.",
|
||||
"manualInstall": "Tải Thủ Công",
|
||||
"urlDescription": "Tải model bằng URL hoặc đường dẫn trên máy. Phù hợp để cụ thể model muốn thêm vào.",
|
||||
"huggingFaceDescription": "Duyệt và cài đặt model từ các repository trên HuggingFace.",
|
||||
"scanFolderDescription": "Quét một thư mục trên máy để tự động tra và tải model.",
|
||||
"recommendedModels": "Model Khuyến Nghị",
|
||||
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
|
||||
"quickStart": "Gói Khởi Đầu Nhanh",
|
||||
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
|
||||
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
|
||||
"stableDiffusion15": "Stable Diffusion 1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -802,7 +873,7 @@
|
||||
"imageDetails": "Chi Tiết Ảnh",
|
||||
"createdBy": "Được Tạo Bởi",
|
||||
"parsingFailed": "Lỗi Cú Pháp",
|
||||
"canvasV2Metadata": "Canvas",
|
||||
"canvasV2Metadata": "Layer Canvas",
|
||||
"parameterSet": "Dữ liệu tham số {{parameter}}",
|
||||
"positivePrompt": "Lệnh Tích Cực",
|
||||
"recallParameter": "Gợi Nhớ {{label}}",
|
||||
@@ -1047,7 +1118,23 @@
|
||||
"unknownField_withName": "Vùng Dữ Liệu Không Rõ \"{{name}}\"",
|
||||
"unexpectedField_withName": "Sai Vùng Dữ Liệu \"{{name}}\"",
|
||||
"unknownFieldEditWorkflowToFix_withName": "Workflow chứa vùng dữ liệu không rõ \"{{name}}\".\nHãy biên tập workflow để sửa lỗi.",
|
||||
"missingField_withName": "Thiếu Vùng Dữ Liệu \"{{name}}\""
|
||||
"missingField_withName": "Thiếu Vùng Dữ Liệu \"{{name}}\"",
|
||||
"layout": {
|
||||
"autoLayout": "Bố Cục Tự Động",
|
||||
"layeringStrategy": "Chiến Lược Phân Layer",
|
||||
"networkSimplex": "Network Simplex",
|
||||
"longestPath": "Đường Đi Dài Nhất",
|
||||
"nodeSpacing": "Khoảng Cách Node",
|
||||
"layerSpacing": "Khoảng Cách Layer",
|
||||
"layoutDirection": "Hướng Bố Cục",
|
||||
"layoutDirectionRight": "Phải",
|
||||
"layoutDirectionDown": "Xuống",
|
||||
"alignment": "Căn Chỉnh Node",
|
||||
"alignmentUL": "Trên Cùng Bên Trái",
|
||||
"alignmentDL": "Dưới Cùng Bên Trái",
|
||||
"alignmentUR": "Trên Cùng Bên Phải",
|
||||
"alignmentDR": "Dưới Cùng Bên Phải"
|
||||
}
|
||||
},
|
||||
"popovers": {
|
||||
"paramCFGRescaleMultiplier": {
|
||||
@@ -1474,6 +1561,20 @@
|
||||
"Lát khối liền mạch bức ảnh theo trục ngang."
|
||||
],
|
||||
"heading": "Lát Khối Liền Mạch Trục X"
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Kích Thước Khối",
|
||||
"paragraphs": [
|
||||
"Điều chỉnh kích thước của khối trong quá trình upscale. Khối càng lớn, bộ nhớ được sử dụng càng nhiều, nhưng có thể tạo sinh ảnh tốt hơn.",
|
||||
"Model SD1.5 mặt định là 768, trong khi SDXL mặc định là 1024. Giảm kích thước khối nếu các gặp vấn đề bộ nhớ."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Chồng Chéo Khối",
|
||||
"paragraphs": [
|
||||
"Điều chỉnh sự chồng chéo giữa các khối liền kề trong quá trình upscale. Giá trị chồng chép lớn giúp giảm sự rõ nét của các chỗ nối nhau, nhưng ngốn nhiều bộ nhớ hơn.",
|
||||
"Giá trị mặc định (128) hoạt động tốt với đa số trường hợp, nhưng bạn có thể điều chỉnh cho phù hợp với nhu cầu cụ thể và hạn chế về bộ nhớ."
|
||||
]
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
@@ -1487,7 +1588,8 @@
|
||||
"defaultVAE": "VAE Mặc Định",
|
||||
"noMatchingModels": "Không có Model phù hợp",
|
||||
"noModelsAvailable": "Không có model",
|
||||
"selectModel": "Chọn Model"
|
||||
"selectModel": "Chọn Model",
|
||||
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
|
||||
},
|
||||
"parameters": {
|
||||
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
|
||||
@@ -1538,7 +1640,10 @@
|
||||
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp."
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
|
||||
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với LUX Kontext thông qua BFL API",
|
||||
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
|
||||
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
|
||||
},
|
||||
"cfgScale": "Thang CFG",
|
||||
"useSeed": "Dùng Hạt Giống",
|
||||
@@ -1869,7 +1974,8 @@
|
||||
"canvasGroup": "Canvas",
|
||||
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
|
||||
"copyToClipboard": "Sao Chép Vào Clipboard",
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard",
|
||||
"newResizedControlLayer": "Layer Điều Khiển Được Đã Chỉnh Kích Thước Mới"
|
||||
},
|
||||
"stagingArea": {
|
||||
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
|
||||
@@ -2050,7 +2156,11 @@
|
||||
},
|
||||
"isolatedLayerPreviewDesc": "Có hay không hiển thị riêng layer này khi thực hiện các thao tác như lọc hay biến đổi.",
|
||||
"isolatedStagingPreview": "Xem Trước Tổng Quan Phần Cô Lập",
|
||||
"isolatedPreview": "Xem Trước Phần Cô Lập"
|
||||
"isolatedPreview": "Xem Trước Phần Cô Lập",
|
||||
"saveAllImagesToGallery": {
|
||||
"label": "Chuyển Sản Phẩm Tạo Sinh Mới Vào Thư Viện Ảnh",
|
||||
"alert": "Đang chuyển sản phẩm tạo sinh mới vào Thư Viện Ảnh, bỏ qua Canvas"
|
||||
}
|
||||
},
|
||||
"tool": {
|
||||
"eraser": "Tẩy",
|
||||
@@ -2062,8 +2172,8 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ thư viện ảnh vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton> hoặc kéo ảnh từ thư viện ảnh vào Ảnh Mẫu để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
|
||||
@@ -2115,7 +2225,22 @@
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ thư viện ảnh vào Ảnh Mẫu này, hoặc <PullBboxButton>kéo hộp giới hạn vào Ảnh Mẫu này</PullBboxButton> để bắt đầu.",
|
||||
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>.",
|
||||
"exportCanvasToPSD": "Xuất Canvas Thành File PSD",
|
||||
"ruleOfThirds": "Hiển Thị Quy Tắc Một Phần Ba",
|
||||
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
|
||||
"hideNonRasterLayers": "Ẩn Layer Không Thuộc Dạng Raster (Shift + H)",
|
||||
"autoSwitch": {
|
||||
"off": "Tắt",
|
||||
"switchOnStart": "Khi Bắt Đầu",
|
||||
"switchOnFinish": "Khi Kết Thúc"
|
||||
},
|
||||
"fitBboxToMasks": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
|
||||
"invertMask": "Đảo Ngược Lớp Phủ",
|
||||
"maxRefImages": "Ảnh Mẫu Tối Đa",
|
||||
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu"
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2161,7 +2286,8 @@
|
||||
"deleteImage": "Xoá Hình Ảnh",
|
||||
"exportPromptTemplates": "Xuất Mẫu Trình Bày Cho Lệnh Ra (CSV)",
|
||||
"templateDeleted": "Mẫu trình bày cho lệnh đã được xoá",
|
||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh"
|
||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh",
|
||||
"togglePromptPreviews": "Bật/Tắt Xem Trước Lệnh"
|
||||
},
|
||||
"system": {
|
||||
"enableLogging": "Bật Chế Độ Ghi Log",
|
||||
@@ -2257,20 +2383,131 @@
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext không hỗ trợ tạo sinh từ hình ảnh từ canvas. Thử sử dụng Ảnh Mẫu và tắt các Layer Dạng Raster.",
|
||||
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
|
||||
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
|
||||
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
|
||||
"noActiveRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
||||
"noVisibleRasterLayers": "Không Có Layer Dạng Raster Hiển Thị",
|
||||
"noVisibleRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
||||
"invalidCanvasDimensions": "Kích Thước Canvas Không Phù Hợp",
|
||||
"canvasTooLarge": "Canvas Quá Lớn",
|
||||
"canvasTooLargeDesc": "Kích thước canvas vượt mức tối đa cho phép để xuất file PSD. Giảm cả chiều dài và chiều rộng chủa canvas và thử lại.",
|
||||
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
|
||||
"psdExportSuccess": "Xuất File PSD Hoàn Tất",
|
||||
"psdExportSuccessDesc": "Thành công xuất {{count}} layer sang file PSD",
|
||||
"problemExportingPSD": "Có Vấn Đề Khi Xuất File PSD",
|
||||
"canvasManagerNotAvailable": "Trình Quản Lý Canvas Không Có Sẵn",
|
||||
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
|
||||
"promptGenerationStarted": "Trình tạo sinh lệnh khởi động",
|
||||
"uploadAndPromptGenerationFailed": "Thất bại khi tải lên ảnh để tạo sinh lệnh",
|
||||
"promptExpansionFailed": "Có vấn đề xảy ra. Hãy thử mở rộng lệnh lại.",
|
||||
"maskInverted": "Đã Đảo Ngược Lớp Phủ",
|
||||
"maskInvertFailed": "Thất Bại Khi Đảo Ngược Lớp Phủ",
|
||||
"noVisibleMasks": "Không Có Lớp Phủ Đang Hiển Thị",
|
||||
"noVisibleMasksDesc": "Tạo hoặc bật ít nhất một lớp phủ inpaint để đảo ngược",
|
||||
"noInpaintMaskSelected": "Không Có Lớp Phủ Inpant Được Chọn",
|
||||
"noInpaintMaskSelectedDesc": "Chọn một lớp phủ inpaint để đảo ngược",
|
||||
"invalidBbox": "Hộp Giới Hạn Không Hợp Lệ",
|
||||
"invalidBboxDesc": "Hợp giới hạn có kích thước không hợp lệ"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"gallery": "Thư Viện Ảnh",
|
||||
"models": "Models",
|
||||
"generation": "Generation (Máy Tạo Sinh)",
|
||||
"upscaling": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)",
|
||||
"canvas": "Canvas (Vùng Ảnh)",
|
||||
"upscalingTab": "$t(common.tab) $t(ui.tabs.upscaling)",
|
||||
"modelsTab": "$t(common.tab) $t(ui.tabs.models)",
|
||||
"queue": "Queue (Hàng Đợi)",
|
||||
"workflows": "Workflow (Luồng Làm Việc)",
|
||||
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
|
||||
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)",
|
||||
"generate": "Tạo Sinh"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Đi sâu hơn với Workflow.",
|
||||
"upscalingTitle": "Upscale và thêm chi tiết.",
|
||||
"canvasTitle": "Biên tập và làm đẹp trên Canvas.",
|
||||
"generateTitle": "Tạo sinh ảnh từ lệnh chữ.",
|
||||
"modelGuideText": "Muốn biết lệnh nào tốt nhất cho từng model chứ?",
|
||||
"modelGuideLink": "Xem thêm Hướng Dẫn Model.",
|
||||
"workflows": {
|
||||
"description": "Workflow là các template tái sử dụng được sẽ tự động hoá các tác vụ tạo sinh ảnh, cho phép bạn nhanh chóng thực hiện cách thao tác phức tạp và nhận được kết quả nhất quán.",
|
||||
"learnMoreLink": "Học thêm cách tạo ra workflow",
|
||||
"browseTemplates": {
|
||||
"title": "Duyệt Template Workflow",
|
||||
"description": "Chọn từ các workflow có sẵn cho những tác vụ cơ bản"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Tạo workflow mới",
|
||||
"description": "Tạo workflow mới từ ban đầu"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Tải workflow từ tệp",
|
||||
"description": "Tải lên workflow để bắt đầu với những thiết lập sẵn có"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Tải Ảnh Để Upscale",
|
||||
"description": "Nhấp hoặc kéo ảnh để upscale (JPG, PNG, WebP lên đến 100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Thay Thế Ảnh Hiện Tại",
|
||||
"description": "Nhấp hoặc kéo ảnh mới để thay thế cái hiện tại"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Ảnh Đã Sẵn Sàng",
|
||||
"description": "Bấm 'Kích Hoạt' để chuẩn bị upscale"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Chuẩn bị upscale!",
|
||||
"description": "Điều chỉnh thiết lập bên dưới, sau đó bấm vào nút 'Khởi Động' để chuẩn bị upscale ảnh."
|
||||
},
|
||||
"upscaleModel": "Model Upscale",
|
||||
"model": "Model",
|
||||
"helpText": {
|
||||
"promptAdvice": "Khi upscale, dùng lệnh để mô tả phương thức và phong cách. Tránh mô tả các chi tiết cụ thể trong ảnh.",
|
||||
"styleAdvice": "Upscale thích hợp nhất cho phong cách chung của ảnh."
|
||||
},
|
||||
"scale": "Kích Thước",
|
||||
"creativityAndStructure": {
|
||||
"title": "Độ Sáng Tạo & Cấu Trúc Mặc Định",
|
||||
"conservative": "Bảo toàn",
|
||||
"balanced": "Cân bằng",
|
||||
"creative": "Sáng tạo",
|
||||
"artistic": "Thẩm mỹ"
|
||||
}
|
||||
},
|
||||
"createNewWorkflowFromScratch": "Tạo workflow mới từ đầu",
|
||||
"browseAndLoadWorkflows": "Duyệt và tải workflow có sẵn",
|
||||
"addStyleRef": {
|
||||
"title": "Thêm Phong Cách Mẫu",
|
||||
"description": "Thêm ảnh để chuyển đổi diện mạo của nó."
|
||||
},
|
||||
"editImage": {
|
||||
"title": "Biên Tập Ảnh",
|
||||
"description": "Thêm ảnh để chỉnh sửa."
|
||||
},
|
||||
"generateFromText": {
|
||||
"title": "Tạo Sinh Từ Chữ",
|
||||
"description": "Nhập lệnh vào và Kích Hoạt."
|
||||
},
|
||||
"useALayoutImage": {
|
||||
"title": "Dùng Bố Cục Ảnh",
|
||||
"description": "Thêm ảnh để điều khiển bố cục."
|
||||
},
|
||||
"generate": {
|
||||
"canvasCalloutTitle": "Đang tìm cách để điều khiển, chỉnh sửa, và làm lại ảnh?",
|
||||
"canvasCalloutLink": "Vào Canvas cho nhiều tính năng hơn."
|
||||
}
|
||||
},
|
||||
"panels": {
|
||||
"launchpad": "Launchpad",
|
||||
"workflowEditor": "Trình Biên Tập Workflow",
|
||||
"imageViewer": "Trình Xem Ảnh",
|
||||
"canvas": "Canvas"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -2423,7 +2660,10 @@
|
||||
"postProcessingMissingModelWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model xử lý hậu kỳ (ảnh sang ảnh).",
|
||||
"missingModelsWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model cần thiết:",
|
||||
"incompatibleBaseModel": "Phiên bản model chính không được hỗ trợ để upscale",
|
||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale."
|
||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale.",
|
||||
"tileControl": "Điều Chỉnh Khối",
|
||||
"tileSize": "Kích Thước Khối",
|
||||
"tileOverlap": "Chồng Chéo Khối"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
|
||||
@@ -2439,8 +2679,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
"Trạng thái Studio được lưu vào server, giúp bạn tiếp tục công việc ở mọi thiết bị.",
|
||||
"Hỗ trợ nhiều ảnh mẫu cho FLUX KONTEXT (chỉ cho model trên máy)."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
@@ -2452,64 +2692,18 @@
|
||||
"supportVideos": {
|
||||
"supportVideos": "Video Hỗ Trợ",
|
||||
"gettingStarted": "Bắt Đầu Làm Quen",
|
||||
"studioSessionsDesc1": "Xem thử <StudioSessionsPlaylistLink /> để hiểu rõ Invoke hơn.",
|
||||
"studioSessionsDesc2": "Đến <DiscordLink /> để tham gia vào phiên trực tiếp và hỏi câu hỏi. Các phiên được tải lên danh sách phát vào các tuần.",
|
||||
"watch": "Xem",
|
||||
"studioSessionsDesc": "Tham gia <DiscordLink /> để xem các buổi phát trực tiếp và đặt câu hỏi. Các phiên được đăng lên trên playlist các tuần tiếp theo.",
|
||||
"videos": {
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "Làm Sao Để Tôi Dùng Trình Biến Đổi Hình Ảnh Sang Hình Ảnh?",
|
||||
"description": "Hướng dẫn cách thực hiện biến đổi ảnh sang ảnh trong Invoke."
|
||||
"gettingStarted": {
|
||||
"title": "Bắt Đầu Với Invoke",
|
||||
"description": "Hoàn thành các video bao hàm mọi thứ bạn cần biết để bắt đầu với Invoke, từ tạo bức ảnh đầu tiên đến các kỹ thuật phức tạp khác."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"description": "Giới thiệu về ảnh mẫu và IP adapter toàn vùng.",
|
||||
"title": "Làm Sao Để Tôi Dùng IP Adapter Toàn Vùng Và Ảnh Mẫu?"
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Học cách sáng tạo ảnh bằng trình điều khiển canvas của Invoke.",
|
||||
"title": "Sáng Tạo Trong Trình Kiểm Soát Canvas Của Invoke"
|
||||
},
|
||||
"upscaling": {
|
||||
"description": "Cách upscale ảnh bằng bộ công cụ của Invoke để nâng cấp độ phân giải.",
|
||||
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện Ảnh?",
|
||||
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện ảnh."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
|
||||
"title": "Làm Sao Để Tôi Chỉnh Sửa Trên Canvas?"
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Làm Sao Để Tôi Dùng ControlNet và Layer Điều Khiển Được?",
|
||||
"description": "Học cách áp dụng layer điều khiển được và controlnet vào ảnh của bạn."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Làm Sao Để Tôi Dùng Lớp Phủ Inpaint?",
|
||||
"description": "Cách áp dụng lớp phủ inpaint vào chỉnh sửa và thay đổi ảnh."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Làm Sao Để Tôi Outpaint?",
|
||||
"description": "Hướng dẫn outpaint bên ngoài viền ảnh gốc."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Giới thiệu về cách tạo ảnh từ ban đầu bằng công cụ Invoke.",
|
||||
"title": "Tạo Hình Ảnh Đầu Tiên Của Bạn"
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"description": "Học cách chỉ dẫn ảnh được tạo ra bằng layer điều khiển được và ảnh mẫu.",
|
||||
"title": "Dùng Layer Điều Khiển Được và Chỉ Dẫn Mẫu"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Hiểu Rõ Trình Hình Ảnh Sang Hình Ảnh Và Trình Khử Nhiễu",
|
||||
"description": "Tổng quan về trình biến đổi ảnh sang ảnh và trình khử nhiễu trong Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Khám Phá Model AI Và Khái Niệm Về Adapter",
|
||||
"description": "Đào sâu vào model AI và cách dùng những adapter để điều khiển một cách sáng tạo."
|
||||
"studioSessions": {
|
||||
"title": "Phiên Studio",
|
||||
"description": "Đào sâu vào các phiên họp để khám phá những tính năng nâng cao của Invoke, sáng tạo workflow, và thảo luận cộng đồng."
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Điều Khiển Canvas",
|
||||
"watch": "Xem"
|
||||
}
|
||||
},
|
||||
"modelCache": {
|
||||
"clearSucceeded": "Cache Model Đã Được Dọn",
|
||||
|
||||
@@ -1772,7 +1772,6 @@
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "生成",
|
||||
"queue": "队列",
|
||||
"canvas": "画布",
|
||||
"upscaling": "放大中",
|
||||
|
||||
@@ -3,9 +3,9 @@ import { useStore } from '@nanostores/react';
|
||||
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
|
||||
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
|
||||
import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { clearStorage } from 'app/store/enhancers/reduxRemember/driver';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import { useClearStorage } from 'common/hooks/useClearStorage';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
@@ -21,25 +21,24 @@ interface Props {
|
||||
|
||||
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
const didStudioInit = useStore($didStudioInit);
|
||||
const clearStorage = useClearStorage();
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
clearStorage();
|
||||
location.reload();
|
||||
return false;
|
||||
}, [clearStorage]);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<ThemeLocaleProvider>
|
||||
<ThemeLocaleProvider>
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||
<AppContent />
|
||||
{!didStudioInit && <Loading />}
|
||||
</Box>
|
||||
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
|
||||
<GlobalModalIsolator />
|
||||
</ThemeLocaleProvider>
|
||||
</ErrorBoundary>
|
||||
</ErrorBoundary>
|
||||
</ThemeLocaleProvider>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||
import { setupListeners } from '@reduxjs/toolkit/query';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection';
|
||||
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
|
||||
import { useLogger } from 'app/logging/useLogger';
|
||||
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
||||
@@ -15,6 +16,8 @@ import { useDndMonitor } from 'features/dnd/useDndMonitor';
|
||||
import { useDynamicPromptsWatcher } from 'features/dynamicPrompts/hooks/useDynamicPromptsWatcher';
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
|
||||
import { useSyncExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
|
||||
import { useSyncNodeErrors } from 'features/nodes/store/util/fieldValidators';
|
||||
import { useReadinessWatcher } from 'features/queue/store/readiness';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
@@ -47,10 +50,13 @@ export const GlobalHookIsolator = memo(
|
||||
useCloseChakraTooltipsOnDragFix();
|
||||
useNavigationApi();
|
||||
useDndMonitor();
|
||||
useSyncNodeErrors();
|
||||
useSyncLangDirection();
|
||||
|
||||
// Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
|
||||
// and/or in progress canvas sessions.
|
||||
useGetQueueCountsByDestinationQuery(queueCountArg);
|
||||
useSyncExecutionState();
|
||||
|
||||
useEffect(() => {
|
||||
i18n.changeLanguage(language);
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
|
||||
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
||||
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
|
||||
import {
|
||||
NewCanvasSessionDialog,
|
||||
NewGallerySessionDialog,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||
@@ -50,8 +46,6 @@ export const GlobalModalIsolator = memo(() => {
|
||||
<RefreshAfterResetModal />
|
||||
<DeleteBoardModal />
|
||||
<GlobalImageHotkeys />
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
|
||||
@@ -5,6 +5,7 @@ import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
|
||||
import type { LoggingOverrides } from 'app/logging/logger';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver';
|
||||
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
@@ -35,7 +36,7 @@ import {
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { ToastConfig } from 'features/toast/toast';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
||||
import { $socketOptions } from 'services/events/stores';
|
||||
@@ -70,6 +71,7 @@ interface Props extends PropsWithChildren {
|
||||
* If provided, overrides in-app navigation to the model manager
|
||||
*/
|
||||
onClickGoToModelManager?: () => void;
|
||||
storagePersistDebounce?: number;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
@@ -96,7 +98,11 @@ const InvokeAIUI = ({
|
||||
loggingOverrides,
|
||||
onClickGoToModelManager,
|
||||
whatsNew,
|
||||
storagePersistDebounce = 300,
|
||||
}: Props) => {
|
||||
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
|
||||
const [didRehydrate, setDidRehydrate] = useState(false);
|
||||
|
||||
useLayoutEffect(() => {
|
||||
/*
|
||||
* We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
|
||||
@@ -308,22 +314,30 @@ const InvokeAIUI = ({
|
||||
};
|
||||
}, [isDebugging]);
|
||||
|
||||
const store = useMemo(() => {
|
||||
return createStore(projectId);
|
||||
}, [projectId]);
|
||||
|
||||
useEffect(() => {
|
||||
const onRehydrated = () => {
|
||||
setDidRehydrate(true);
|
||||
};
|
||||
const store = createStore({ persist: true, persistDebounce: storagePersistDebounce, onRehydrated });
|
||||
setStore(store);
|
||||
$store.set(store);
|
||||
if (import.meta.env.MODE === 'development') {
|
||||
window.$store = $store;
|
||||
}
|
||||
() => {
|
||||
const removeStorageListeners = addStorageListeners();
|
||||
return () => {
|
||||
removeStorageListeners();
|
||||
setStore(undefined);
|
||||
$store.set(undefined);
|
||||
if (import.meta.env.MODE === 'development') {
|
||||
window.$store = undefined;
|
||||
}
|
||||
};
|
||||
}, [store]);
|
||||
}, [storagePersistDebounce]);
|
||||
|
||||
if (!store || !didRehydrate) {
|
||||
return <Loading />;
|
||||
}
|
||||
|
||||
return (
|
||||
<React.StrictMode>
|
||||
|
||||
@@ -3,43 +3,39 @@ import 'overlayscrollbars/overlayscrollbars.css';
|
||||
import '@xyflow/react/dist/base.css';
|
||||
import 'common/components/OverlayScrollbars/overlayscrollbars.css';
|
||||
|
||||
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||
import { ChakraProvider, DarkMode, extendTheme, theme as baseTheme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $direction } from 'app/hooks/useSyncLangDirection';
|
||||
import type { ReactNode } from 'react';
|
||||
import { memo, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
type ThemeLocaleProviderProps = {
|
||||
children: ReactNode;
|
||||
};
|
||||
|
||||
const buildTheme = (direction: 'ltr' | 'rtl') => {
|
||||
return extendTheme({
|
||||
...baseTheme,
|
||||
direction,
|
||||
shadows: {
|
||||
...baseTheme.shadows,
|
||||
selected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverSelected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverUnselected:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
selectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
hoverSelectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
|
||||
const { i18n } = useTranslation();
|
||||
|
||||
const direction = i18n.dir();
|
||||
|
||||
const theme = useMemo(() => {
|
||||
return extendTheme({
|
||||
..._theme,
|
||||
direction,
|
||||
shadows: {
|
||||
..._theme.shadows,
|
||||
selected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverSelected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverUnselected:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
selectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
hoverSelectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
});
|
||||
}, [direction]);
|
||||
|
||||
useEffect(() => {
|
||||
document.body.dir = direction;
|
||||
}, [direction]);
|
||||
const direction = useStore($direction);
|
||||
const theme = useMemo(() => buildTheme(direction), [direction]);
|
||||
|
||||
return (
|
||||
<ChakraProvider theme={theme} toastOptions={TOAST_OPTIONS}>
|
||||
|
||||
@@ -21,7 +21,6 @@ import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/st
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
||||
import { atom } from 'nanostores';
|
||||
import { useCallback, useEffect } from 'react';
|
||||
@@ -165,7 +164,6 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
// Go to the generate tab, open the launchpad
|
||||
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
|
||||
store.dispatch(paramsReset());
|
||||
store.dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
break;
|
||||
case 'canvas':
|
||||
// Go to the canvas tab, open the launchpad
|
||||
|
||||
36
invokeai/frontend/web/src/app/hooks/useSyncLangDirection.ts
Normal file
36
invokeai/frontend/web/src/app/hooks/useSyncLangDirection.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { atom } from 'nanostores';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
/**
|
||||
* Global atom storing the language direction, to be consumed by the Chakra theme.
|
||||
*
|
||||
* Why do we need this? We have a kind of catch-22:
|
||||
* - The Chakra theme needs to know the language direction to apply the correct styles.
|
||||
* - The language direction is determined by i18n and the language selection.
|
||||
* - We want our error boundary to be themed.
|
||||
* - It's possible that i18n can throw if the language selection is invalid or not supported.
|
||||
*
|
||||
* Previously, we had the logic in this file in the theme provider, which wrapped the error boundary. The error
|
||||
* was properly themed. But then, if i18n threw in the theme provider, the error boundary does not catch the
|
||||
* error. The app would crash to a white screen.
|
||||
*
|
||||
* We tried swapping the component hierarchy so that the error boundary wraps the theme provider, but then the
|
||||
* error boundary isn't themed!
|
||||
*
|
||||
* The solution is to move this i18n direction logic out of the theme provider and into a hook that we can use
|
||||
* within the error boundary. The error boundary will be themed, _and_ catch any i18n errors.
|
||||
*/
|
||||
export const $direction = atom<'ltr' | 'rtl'>('ltr');
|
||||
|
||||
export const useSyncLangDirection = () => {
|
||||
useAssertSingleton('useSyncLangDirection');
|
||||
const { i18n, t } = useTranslation();
|
||||
|
||||
useEffect(() => {
|
||||
const direction = i18n.dir();
|
||||
$direction.set(direction);
|
||||
document.body.dir = direction;
|
||||
}, [i18n, t]);
|
||||
};
|
||||
@@ -2,7 +2,7 @@ import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
import { atom } from 'nanostores';
|
||||
import type { Logger, MessageSerializer } from 'roarr';
|
||||
import { ROARR, Roarr } from 'roarr';
|
||||
import { z } from 'zod/v4';
|
||||
import { z } from 'zod';
|
||||
|
||||
const serializeMessage: MessageSerializer = (message) => {
|
||||
return JSON.stringify(message);
|
||||
@@ -93,5 +93,7 @@ export const configureLogging = (
|
||||
localStorage.setItem('ROARR_FILTER', filter);
|
||||
}
|
||||
|
||||
ROARR.write = createLogWriter();
|
||||
const styleOutput = localStorage.getItem('ROARR_STYLE_OUTPUT') === 'false' ? false : true;
|
||||
|
||||
ROARR.write = createLogWriter({ styleOutput });
|
||||
};
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
export const STORAGE_PREFIX = '@@invokeai-';
|
||||
export const EMPTY_ARRAY = [];
|
||||
export const EMPTY_OBJECT = {};
|
||||
|
||||
@@ -1,40 +1,209 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $projectId } from 'app/store/nanostores/projectId';
|
||||
import { $queueId } from 'app/store/nanostores/queueId';
|
||||
import type { UseStore } from 'idb-keyval';
|
||||
import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
|
||||
import { atom } from 'nanostores';
|
||||
import { createStore as idbCreateStore, del as idbDel, get as idbGet } from 'idb-keyval';
|
||||
import type { Driver } from 'redux-remember';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { buildV1Url, getBaseUrl } from 'services/api';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
// Create a custom idb-keyval store (just needed to customize the name)
|
||||
const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
|
||||
const log = logger('system');
|
||||
|
||||
export const clearIdbKeyValStore = () => {
|
||||
clear($idbKeyValStore.get());
|
||||
const getUrl = (endpoint: 'get_by_key' | 'set_by_key' | 'delete', key?: string) => {
|
||||
const baseUrl = getBaseUrl();
|
||||
const query: Record<string, string> = {};
|
||||
if (key) {
|
||||
query['key'] = key;
|
||||
}
|
||||
|
||||
const path = buildV1Url(`client_state/${$queueId.get()}/${endpoint}`, query);
|
||||
const url = `${baseUrl}/${path}`;
|
||||
return url;
|
||||
};
|
||||
|
||||
// Create redux-remember driver, wrapping idb-keyval
|
||||
export const idbKeyValDriver: Driver = {
|
||||
getItem: (key) => {
|
||||
try {
|
||||
return get(key, $idbKeyValStore.get());
|
||||
} catch (originalError) {
|
||||
throw new StorageError({
|
||||
key,
|
||||
projectId: $projectId.get(),
|
||||
originalError,
|
||||
});
|
||||
}
|
||||
},
|
||||
setItem: (key, value) => {
|
||||
try {
|
||||
return set(key, value, $idbKeyValStore.get());
|
||||
} catch (originalError) {
|
||||
throw new StorageError({
|
||||
key,
|
||||
value,
|
||||
projectId: $projectId.get(),
|
||||
originalError,
|
||||
});
|
||||
}
|
||||
},
|
||||
const getHeaders = () => {
|
||||
const headers = new Headers();
|
||||
const authToken = $authToken.get();
|
||||
const projectId = $projectId.get();
|
||||
if (authToken) {
|
||||
headers.set('Authorization', `Bearer ${authToken}`);
|
||||
}
|
||||
if (projectId) {
|
||||
headers.set('project-id', projectId);
|
||||
}
|
||||
return headers;
|
||||
};
|
||||
|
||||
// Persistence happens per slice. To track when persistence is in progress, maintain a ref count, incrementing
|
||||
// it when a slice is being persisted and decrementing it when the persistence is done.
|
||||
let persistRefCount = 0;
|
||||
|
||||
// Keep track of the last persisted state for each key to avoid unnecessary network requests.
|
||||
//
|
||||
// `redux-remember` persists individual slices of state, so we can implicity denylist a slice by not giving it a
|
||||
// persist config.
|
||||
//
|
||||
// However, we may need to avoid persisting individual _fields_ of a slice. `redux-remember` does not provide a
|
||||
// way to do this directly.
|
||||
//
|
||||
// To accomplish this, we add a layer of logic on top of the `redux-remember`. In the state serializer function
|
||||
// provided to `redux-remember`, we can omit certain fields from the state that we do not want to persist. See
|
||||
// the implementation in `store.ts` for this logic.
|
||||
//
|
||||
// This logic is unknown to `redux-remember`. When an omitted field changes, it will still attempt to persist the
|
||||
// whole slice, even if the final, _serialized_ slice value is unchanged.
|
||||
//
|
||||
// To avoid unnecessary network requests, we keep track of the last persisted state for each key in this map.
|
||||
// If the value to be persisted is the same as the last persisted value, we will skip the network request.
|
||||
const lastPersistedState = new Map<string, string | undefined>();
|
||||
|
||||
// As of v6.3.0, we use server-backed storage for client state. This replaces the previous IndexedDB-based storage,
|
||||
// which was implemented using `idb-keyval`.
|
||||
//
|
||||
// To facilitate a smooth transition, we implement a migration strategy that attempts to retrieve values from IndexedDB
|
||||
// and persist them to the new server-backed storage. This is done on a best-effort basis.
|
||||
|
||||
// These constants were used in the previous IndexedDB-based storage implementation.
|
||||
const IDB_DB_NAME = 'invoke';
|
||||
const IDB_STORE_NAME = 'invoke-store';
|
||||
const IDB_STORAGE_PREFIX = '@@invokeai-';
|
||||
|
||||
// Lazy store creation
|
||||
let _idbKeyValStore: UseStore | null = null;
|
||||
const getIdbKeyValStore = () => {
|
||||
if (_idbKeyValStore === null) {
|
||||
_idbKeyValStore = idbCreateStore(IDB_DB_NAME, IDB_STORE_NAME);
|
||||
}
|
||||
return _idbKeyValStore;
|
||||
};
|
||||
|
||||
const getIdbKey = (key: string) => {
|
||||
return `${IDB_STORAGE_PREFIX}${key}`;
|
||||
};
|
||||
|
||||
const getItem = async (key: string) => {
|
||||
try {
|
||||
const url = getUrl('get_by_key', key);
|
||||
const headers = getHeaders();
|
||||
const res = await fetch(url, { method: 'GET', headers });
|
||||
if (!res.ok) {
|
||||
throw new Error(`Response status: ${res.status}`);
|
||||
}
|
||||
const value = await res.json();
|
||||
|
||||
// Best-effort migration from IndexedDB to the new storage system
|
||||
log.trace({ key, value }, 'Server-backed storage value retrieved');
|
||||
|
||||
if (!value) {
|
||||
const idbKey = getIdbKey(key);
|
||||
try {
|
||||
// It's a bit tricky to query IndexedDB directly to check if value exists, so we use `idb-keyval` to do it.
|
||||
// Thing is, `idb-keyval` requires you to create a store to query it. End result - we are creating a store
|
||||
// even if we don't use it for anything besides checking if the key is present.
|
||||
const idbKeyValStore = getIdbKeyValStore();
|
||||
const idbValue = await idbGet(idbKey, idbKeyValStore);
|
||||
if (idbValue) {
|
||||
log.debug(
|
||||
{ key, idbKey, idbValue },
|
||||
'No value in server-backed storage, but found value in IndexedDB - attempting migration'
|
||||
);
|
||||
await idbDel(idbKey, idbKeyValStore);
|
||||
await setItem(key, idbValue);
|
||||
log.debug({ key, idbKey, idbValue }, 'Migration successful');
|
||||
return idbValue;
|
||||
}
|
||||
} catch (error) {
|
||||
// Just log if IndexedDB retrieval fails - this is a best-effort migration.
|
||||
log.debug(
|
||||
{ key, idbKey, error: serializeError(error) } as JsonObject,
|
||||
'Error checking for or migrating from IndexedDB'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
lastPersistedState.set(key, value);
|
||||
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Getting state for ${key}`);
|
||||
return value;
|
||||
} catch (originalError) {
|
||||
throw new StorageError({
|
||||
key,
|
||||
projectId: $projectId.get(),
|
||||
originalError,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const setItem = async (key: string, value: string) => {
|
||||
try {
|
||||
persistRefCount++;
|
||||
if (lastPersistedState.get(key) === value) {
|
||||
log.trace(
|
||||
{ key, last: lastPersistedState.get(key), next: value },
|
||||
`Skipping persist for ${key} as value is unchanged`
|
||||
);
|
||||
return value;
|
||||
}
|
||||
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Persisting state for ${key}`);
|
||||
const url = getUrl('set_by_key', key);
|
||||
const headers = getHeaders();
|
||||
const res = await fetch(url, { method: 'POST', headers, body: value });
|
||||
if (!res.ok) {
|
||||
throw new Error(`Response status: ${res.status}`);
|
||||
}
|
||||
const resultValue = await res.json();
|
||||
lastPersistedState.set(key, resultValue);
|
||||
return resultValue;
|
||||
} catch (originalError) {
|
||||
throw new StorageError({
|
||||
key,
|
||||
value,
|
||||
projectId: $projectId.get(),
|
||||
originalError,
|
||||
});
|
||||
} finally {
|
||||
persistRefCount--;
|
||||
if (persistRefCount < 0) {
|
||||
log.trace('Persist ref count is negative, resetting to 0');
|
||||
persistRefCount = 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const reduxRememberDriver: Driver = { getItem, setItem };
|
||||
|
||||
export const clearStorage = async () => {
|
||||
try {
|
||||
persistRefCount++;
|
||||
const url = getUrl('delete');
|
||||
const headers = getHeaders();
|
||||
const res = await fetch(url, { method: 'POST', headers });
|
||||
if (!res.ok) {
|
||||
throw new Error(`Response status: ${res.status}`);
|
||||
}
|
||||
} catch {
|
||||
log.error('Failed to reset client state');
|
||||
} finally {
|
||||
persistRefCount--;
|
||||
lastPersistedState.clear();
|
||||
if (persistRefCount < 0) {
|
||||
log.trace('Persist ref count is negative, resetting to 0');
|
||||
persistRefCount = 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
export const addStorageListeners = () => {
|
||||
const onBeforeUnload = (e: BeforeUnloadEvent) => {
|
||||
if (persistRefCount > 0) {
|
||||
e.preventDefault();
|
||||
}
|
||||
};
|
||||
window.addEventListener('beforeunload', onBeforeUnload);
|
||||
|
||||
return () => {
|
||||
window.removeEventListener('beforeunload', onBeforeUnload);
|
||||
};
|
||||
};
|
||||
|
||||
@@ -33,8 +33,9 @@ export class StorageError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
export const errorHandler = (err: PersistError | RehydrateError) => {
|
||||
const log = logger('system');
|
||||
if (err instanceof PersistError) {
|
||||
log.error({ error: serializeError(err) }, 'Problem persisting state');
|
||||
} else if (err instanceof RehydrateError) {
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
import type { TypedStartListening } from '@reduxjs/toolkit';
|
||||
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
|
||||
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
|
||||
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
|
||||
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
|
||||
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
|
||||
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
|
||||
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
|
||||
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
|
||||
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
|
||||
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
|
||||
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
|
||||
import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded';
|
||||
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
|
||||
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
|
||||
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
|
||||
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
|
||||
import { addArchivedOrDeletedBoardListener } from './listeners/addArchivedOrDeletedBoardListener';
|
||||
|
||||
export const listenerMiddleware = createListenerMiddleware();
|
||||
|
||||
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
||||
|
||||
const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
||||
|
||||
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
|
||||
|
||||
/**
|
||||
* The RTK listener middleware is a lightweight alternative sagas/observables.
|
||||
*
|
||||
* Most side effect logic should live in a listener.
|
||||
*/
|
||||
|
||||
// Image uploaded
|
||||
addImageUploadedFulfilledListener(startAppListening);
|
||||
|
||||
// Image deleted
|
||||
addDeleteBoardAndImagesFulfilledListener(startAppListening);
|
||||
|
||||
// User Invoked
|
||||
addAnyEnqueuedListener(startAppListening);
|
||||
addBatchEnqueuedListener(startAppListening);
|
||||
|
||||
// Socket.IO
|
||||
addSocketConnectedEventListener(startAppListening);
|
||||
|
||||
// Gallery bulk download
|
||||
addBulkDownloadListeners(startAppListening);
|
||||
|
||||
// Boards
|
||||
addImageAddedToBoardFulfilledListener(startAppListening);
|
||||
addImageRemovedFromBoardFulfilledListener(startAppListening);
|
||||
addBoardIdSelectedListener(startAppListening);
|
||||
addArchivedOrDeletedBoardListener(startAppListening);
|
||||
|
||||
// Node schemas
|
||||
addGetOpenAPISchemaListener(startAppListening);
|
||||
|
||||
// Models
|
||||
addModelSelectedListener(startAppListening);
|
||||
|
||||
// app startup
|
||||
addAppStartedListener(startAppListening);
|
||||
addModelsLoadedListener(startAppListening);
|
||||
addAppConfigReceivedListener(startAppListening);
|
||||
|
||||
// Ad-hoc upscale workflwo
|
||||
addAdHocPostProcessingRequestedListener(startAppListening);
|
||||
|
||||
addSetDefaultSettingsListener(startAppListening);
|
||||
@@ -1,6 +1,6 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import {
|
||||
autoAddBoardIdChanged,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addAnyEnqueuedListener = (startAppListening: AppStartListening) => {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
|
||||
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { truncate } from 'es-toolkit/compat';
|
||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getImageUsage } from 'features/deleteImageModal/store/state';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { size } from 'es-toolkit/compat';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import type { AppStartListening, RootState } from 'app/store/store';
|
||||
import { omit } from 'es-toolkit/compat';
|
||||
import { imageUploadedClientSide } from 'features/gallery/store/actions';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import { modelChanged, syncedToOptimalDimension, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||
import { refImageModelChanged, selectReferenceImageEntities } from 'features/controlLayers/store/refImagesSlice';
|
||||
@@ -152,7 +152,8 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
if (modelBase !== state.params.model?.base) {
|
||||
// Sync generate tab settings whenever the model base changes
|
||||
dispatch(syncedToOptimalDimension());
|
||||
if (!selectIsStaging(state)) {
|
||||
const isStaging = buildSelectIsStaging(selectCanvasSessionId(state))(state);
|
||||
if (!isStaging) {
|
||||
// Canvas tab only syncs if not staging
|
||||
dispatch(bboxSyncedToOptimalDimension());
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import type { AppDispatch, AppStartListening, RootState } from 'app/store/store';
|
||||
import { controlLayerModelChanged, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import {
|
||||
@@ -15,7 +14,11 @@ import { refImageModelChanged, selectRefImagesSlice } from 'features/controlLaye
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import {
|
||||
postProcessingModelChanged,
|
||||
tileControlnetModelChanged,
|
||||
upscaleModelChanged,
|
||||
} from 'features/parameters/store/upscaleSlice';
|
||||
import {
|
||||
zParameterCLIPEmbedModel,
|
||||
zParameterSpandrelImageToImageModel,
|
||||
@@ -28,6 +31,7 @@ import type { AnyModelConfig } from 'services/api/types';
|
||||
import {
|
||||
isCLIPEmbedModelConfig,
|
||||
isControlLayerModelConfig,
|
||||
isControlNetModelConfig,
|
||||
isFluxReduxModelConfig,
|
||||
isFluxVAEModelConfig,
|
||||
isIPAdapterModelConfig,
|
||||
@@ -71,6 +75,7 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
|
||||
handleControlAdapterModels(models, state, dispatch, log);
|
||||
handlePostProcessingModel(models, state, dispatch, log);
|
||||
handleUpscaleModel(models, state, dispatch, log);
|
||||
handleTileControlNetModel(models, state, dispatch, log);
|
||||
handleIPAdapterModels(models, state, dispatch, log);
|
||||
handleT5EncoderModels(models, state, dispatch, log);
|
||||
handleCLIPEmbedModels(models, state, dispatch, log);
|
||||
@@ -345,6 +350,46 @@ const handleUpscaleModel: ModelHandler = (models, state, dispatch, log) => {
|
||||
}
|
||||
};
|
||||
|
||||
const handleTileControlNetModel: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedTileControlNetModel = state.upscale.tileControlnetModel;
|
||||
const controlNetModels = models.filter(isControlNetModelConfig);
|
||||
|
||||
// If the currently selected model is available, we don't need to do anything
|
||||
if (selectedTileControlNetModel && controlNetModels.some((m) => m.key === selectedTileControlNetModel.key)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// The only way we have to identify a model as a tile model is by its name containing 'tile' :)
|
||||
const tileModel = controlNetModels.find((m) => m.name.toLowerCase().includes('tile'));
|
||||
|
||||
// If we have a tile model, select it
|
||||
if (tileModel) {
|
||||
log.debug(
|
||||
{ selectedTileControlNetModel, tileModel },
|
||||
'No selected tile ControlNet model or selected model is not available, selecting tile model'
|
||||
);
|
||||
dispatch(tileControlnetModelChanged(tileModel));
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise, select the first available ControlNet model
|
||||
const firstModel = controlNetModels[0] || null;
|
||||
if (firstModel) {
|
||||
log.debug(
|
||||
{ selectedTileControlNetModel, firstModel },
|
||||
'No tile ControlNet model found, selecting first available ControlNet model'
|
||||
);
|
||||
dispatch(tileControlnetModelChanged(firstModel));
|
||||
return;
|
||||
}
|
||||
|
||||
// No available models, we should clear the selected model - but only if we have one selected
|
||||
if (selectedTileControlNetModel) {
|
||||
log.debug({ selectedTileControlNetModel }, 'Selected tile ControlNet model is not available, clearing');
|
||||
dispatch(tileControlnetModelChanged(null));
|
||||
}
|
||||
};
|
||||
|
||||
const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedT5EncoderModel = state.params.t5EncoderModel;
|
||||
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfig(m));
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { isNil } from 'es-toolkit';
|
||||
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import {
|
||||
heightChanged,
|
||||
setCfgRescaleMultiplier,
|
||||
@@ -115,7 +115,8 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
|
||||
}
|
||||
const setSizeOptions = { updateAspectRatio: true, clamp: true };
|
||||
|
||||
const isStaging = selectIsStaging(getState());
|
||||
const isStaging = buildSelectIsStaging(selectCanvasSessionId(state))(state);
|
||||
|
||||
const activeTab = selectActiveTab(getState());
|
||||
if (activeTab === 'generate') {
|
||||
if (isParameterWidth(width)) {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { objectEquals } from '@observ33r/object-equals';
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import type { AppStartListening } from 'app/store/store';
|
||||
import { atom } from 'nanostores';
|
||||
import { api } from 'services/api';
|
||||
import { modelsApi } from 'services/api/endpoints/models';
|
||||
|
||||
@@ -1,194 +1,219 @@
|
||||
import type { ThunkDispatch, UnknownAction } from '@reduxjs/toolkit';
|
||||
import { autoBatchEnhancer, combineReducers, configureStore } from '@reduxjs/toolkit';
|
||||
import type { ThunkDispatch, TypedStartListening, UnknownAction } from '@reduxjs/toolkit';
|
||||
import { addListener, combineReducers, configureStore, createAction, createListenerMiddleware } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { idbKeyValDriver } from 'app/store/enhancers/reduxRemember/driver';
|
||||
import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
||||
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
|
||||
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
|
||||
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
|
||||
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
|
||||
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
|
||||
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
|
||||
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
|
||||
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
|
||||
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
|
||||
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
|
||||
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
|
||||
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
|
||||
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
|
||||
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { keys, mergeWith, omit, pick } from 'es-toolkit/compat';
|
||||
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
|
||||
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
|
||||
import {
|
||||
canvasSessionSlice,
|
||||
canvasStagingAreaPersistConfig,
|
||||
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { refImagesPersistConfig, refImagesSlice } from 'features/controlLayers/store/refImagesSlice';
|
||||
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
import { modelManagerV2PersistConfig, modelManagerV2Slice } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import { nodesPersistConfig, nodesSlice, nodesUndoableConfig } from 'features/nodes/store/nodesSlice';
|
||||
import { workflowLibraryPersistConfig, workflowLibrarySlice } from 'features/nodes/store/workflowLibrarySlice';
|
||||
import { workflowSettingsPersistConfig, workflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { upscalePersistConfig, upscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { queueSlice } from 'features/queue/store/queueSlice';
|
||||
import { stylePresetPersistConfig, stylePresetSlice } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { configSlice } from 'features/system/store/configSlice';
|
||||
import { systemPersistConfig, systemSlice } from 'features/system/store/systemSlice';
|
||||
import { uiPersistConfig, uiSlice } from 'features/ui/store/uiSlice';
|
||||
import { changeBoardModalSliceConfig } from 'features/changeBoardModal/store/slice';
|
||||
import { canvasSettingsSliceConfig } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { canvasSliceConfig } from 'features/controlLayers/store/canvasSlice';
|
||||
import { canvasSessionSliceConfig } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { lorasSliceConfig } from 'features/controlLayers/store/lorasSlice';
|
||||
import { paramsSliceConfig } from 'features/controlLayers/store/paramsSlice';
|
||||
import { refImagesSliceConfig } from 'features/controlLayers/store/refImagesSlice';
|
||||
import { dynamicPromptsSliceConfig } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { gallerySliceConfig } from 'features/gallery/store/gallerySlice';
|
||||
import { modelManagerSliceConfig } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import { nodesSliceConfig } from 'features/nodes/store/nodesSlice';
|
||||
import { workflowLibrarySliceConfig } from 'features/nodes/store/workflowLibrarySlice';
|
||||
import { workflowSettingsSliceConfig } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { upscaleSliceConfig } from 'features/parameters/store/upscaleSlice';
|
||||
import { queueSliceConfig } from 'features/queue/store/queueSlice';
|
||||
import { stylePresetSliceConfig } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { configSliceConfig } from 'features/system/store/configSlice';
|
||||
import { systemSliceConfig } from 'features/system/store/systemSlice';
|
||||
import { uiSliceConfig } from 'features/ui/store/uiSlice';
|
||||
import { diff } from 'jsondiffpatch';
|
||||
import dynamicMiddlewares from 'redux-dynamic-middlewares';
|
||||
import type { SerializeFunction, UnserializeFunction } from 'redux-remember';
|
||||
import { rememberEnhancer, rememberReducer } from 'redux-remember';
|
||||
import undoable from 'redux-undo';
|
||||
import { REMEMBER_REHYDRATED, rememberEnhancer, rememberReducer } from 'redux-remember';
|
||||
import undoable, { newHistory } from 'redux-undo';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { api } from 'services/api';
|
||||
import { authToastMiddleware } from 'services/api/authToastMiddleware';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
import { STORAGE_PREFIX } from './constants';
|
||||
import { reduxRememberDriver } from './enhancers/reduxRemember/driver';
|
||||
import { actionSanitizer } from './middleware/devtools/actionSanitizer';
|
||||
import { actionsDenylist } from './middleware/devtools/actionsDenylist';
|
||||
import { stateSanitizer } from './middleware/devtools/stateSanitizer';
|
||||
import { listenerMiddleware } from './middleware/listenerMiddleware';
|
||||
import { addArchivedOrDeletedBoardListener } from './middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener';
|
||||
import { addImageUploadedFulfilledListener } from './middleware/listenerMiddleware/listeners/imageUploaded';
|
||||
|
||||
export const listenerMiddleware = createListenerMiddleware();
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
const allReducers = {
|
||||
[api.reducerPath]: api.reducer,
|
||||
[gallerySlice.name]: gallerySlice.reducer,
|
||||
[nodesSlice.name]: undoable(nodesSlice.reducer, nodesUndoableConfig),
|
||||
[systemSlice.name]: systemSlice.reducer,
|
||||
[configSlice.name]: configSlice.reducer,
|
||||
[uiSlice.name]: uiSlice.reducer,
|
||||
[dynamicPromptsSlice.name]: dynamicPromptsSlice.reducer,
|
||||
[changeBoardModalSlice.name]: changeBoardModalSlice.reducer,
|
||||
[modelManagerV2Slice.name]: modelManagerV2Slice.reducer,
|
||||
[queueSlice.name]: queueSlice.reducer,
|
||||
[canvasSlice.name]: undoable(canvasSlice.reducer, canvasUndoableConfig),
|
||||
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
|
||||
[upscaleSlice.name]: upscaleSlice.reducer,
|
||||
[stylePresetSlice.name]: stylePresetSlice.reducer,
|
||||
[paramsSlice.name]: paramsSlice.reducer,
|
||||
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
|
||||
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
|
||||
[lorasSlice.name]: lorasSlice.reducer,
|
||||
[workflowLibrarySlice.name]: workflowLibrarySlice.reducer,
|
||||
[refImagesSlice.name]: refImagesSlice.reducer,
|
||||
// When adding a slice, add the config to the SLICE_CONFIGS object below, then add the reducer to ALL_REDUCERS.
|
||||
const SLICE_CONFIGS = {
|
||||
[canvasSessionSliceConfig.slice.reducerPath]: canvasSessionSliceConfig,
|
||||
[canvasSettingsSliceConfig.slice.reducerPath]: canvasSettingsSliceConfig,
|
||||
[canvasSliceConfig.slice.reducerPath]: canvasSliceConfig,
|
||||
[changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig,
|
||||
[configSliceConfig.slice.reducerPath]: configSliceConfig,
|
||||
[dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig,
|
||||
[gallerySliceConfig.slice.reducerPath]: gallerySliceConfig,
|
||||
[lorasSliceConfig.slice.reducerPath]: lorasSliceConfig,
|
||||
[modelManagerSliceConfig.slice.reducerPath]: modelManagerSliceConfig,
|
||||
[nodesSliceConfig.slice.reducerPath]: nodesSliceConfig,
|
||||
[paramsSliceConfig.slice.reducerPath]: paramsSliceConfig,
|
||||
[queueSliceConfig.slice.reducerPath]: queueSliceConfig,
|
||||
[refImagesSliceConfig.slice.reducerPath]: refImagesSliceConfig,
|
||||
[stylePresetSliceConfig.slice.reducerPath]: stylePresetSliceConfig,
|
||||
[systemSliceConfig.slice.reducerPath]: systemSliceConfig,
|
||||
[uiSliceConfig.slice.reducerPath]: uiSliceConfig,
|
||||
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig,
|
||||
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig,
|
||||
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig,
|
||||
};
|
||||
|
||||
const rootReducer = combineReducers(allReducers);
|
||||
// TS makes it really hard to dynamically create this object :/ so it's just hardcoded here.
|
||||
// Remember to wrap undoable reducers in `undoable()`!
|
||||
const ALL_REDUCERS = {
|
||||
[api.reducerPath]: api.reducer,
|
||||
[canvasSessionSliceConfig.slice.reducerPath]: canvasSessionSliceConfig.slice.reducer,
|
||||
[canvasSettingsSliceConfig.slice.reducerPath]: canvasSettingsSliceConfig.slice.reducer,
|
||||
// Undoable!
|
||||
[canvasSliceConfig.slice.reducerPath]: undoable(
|
||||
canvasSliceConfig.slice.reducer,
|
||||
canvasSliceConfig.undoableConfig?.reduxUndoOptions
|
||||
),
|
||||
[changeBoardModalSliceConfig.slice.reducerPath]: changeBoardModalSliceConfig.slice.reducer,
|
||||
[configSliceConfig.slice.reducerPath]: configSliceConfig.slice.reducer,
|
||||
[dynamicPromptsSliceConfig.slice.reducerPath]: dynamicPromptsSliceConfig.slice.reducer,
|
||||
[gallerySliceConfig.slice.reducerPath]: gallerySliceConfig.slice.reducer,
|
||||
[lorasSliceConfig.slice.reducerPath]: lorasSliceConfig.slice.reducer,
|
||||
[modelManagerSliceConfig.slice.reducerPath]: modelManagerSliceConfig.slice.reducer,
|
||||
// Undoable!
|
||||
[nodesSliceConfig.slice.reducerPath]: undoable(
|
||||
nodesSliceConfig.slice.reducer,
|
||||
nodesSliceConfig.undoableConfig?.reduxUndoOptions
|
||||
),
|
||||
[paramsSliceConfig.slice.reducerPath]: paramsSliceConfig.slice.reducer,
|
||||
[queueSliceConfig.slice.reducerPath]: queueSliceConfig.slice.reducer,
|
||||
[refImagesSliceConfig.slice.reducerPath]: refImagesSliceConfig.slice.reducer,
|
||||
[stylePresetSliceConfig.slice.reducerPath]: stylePresetSliceConfig.slice.reducer,
|
||||
[systemSliceConfig.slice.reducerPath]: systemSliceConfig.slice.reducer,
|
||||
[uiSliceConfig.slice.reducerPath]: uiSliceConfig.slice.reducer,
|
||||
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig.slice.reducer,
|
||||
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig.slice.reducer,
|
||||
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig.slice.reducer,
|
||||
};
|
||||
|
||||
const rootReducer = combineReducers(ALL_REDUCERS);
|
||||
|
||||
const rememberedRootReducer = rememberReducer(rootReducer);
|
||||
|
||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||
export type PersistConfig<T = any> = {
|
||||
/**
|
||||
* The name of the slice.
|
||||
*/
|
||||
name: keyof typeof allReducers;
|
||||
/**
|
||||
* The initial state of the slice.
|
||||
*/
|
||||
initialState: T;
|
||||
/**
|
||||
* Migrate the state to the current version during rehydration.
|
||||
* @param state The rehydrated state.
|
||||
* @returns A correctly-shaped state.
|
||||
*/
|
||||
migrate: (state: unknown) => T;
|
||||
/**
|
||||
* Keys to omit from the persisted state.
|
||||
*/
|
||||
persistDenylist: (keyof T)[];
|
||||
};
|
||||
|
||||
const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[galleryPersistConfig.name]: galleryPersistConfig,
|
||||
[nodesPersistConfig.name]: nodesPersistConfig,
|
||||
[systemPersistConfig.name]: systemPersistConfig,
|
||||
[uiPersistConfig.name]: uiPersistConfig,
|
||||
[dynamicPromptsPersistConfig.name]: dynamicPromptsPersistConfig,
|
||||
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
|
||||
[canvasPersistConfig.name]: canvasPersistConfig,
|
||||
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
|
||||
[upscalePersistConfig.name]: upscalePersistConfig,
|
||||
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
|
||||
[paramsPersistConfig.name]: paramsPersistConfig,
|
||||
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
|
||||
[canvasStagingAreaPersistConfig.name]: canvasStagingAreaPersistConfig,
|
||||
[lorasPersistConfig.name]: lorasPersistConfig,
|
||||
[workflowLibraryPersistConfig.name]: workflowLibraryPersistConfig,
|
||||
[refImagesSlice.name]: refImagesPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
const persistConfig = persistConfigs[key as keyof typeof persistConfigs];
|
||||
if (!persistConfig) {
|
||||
const sliceConfig = SLICE_CONFIGS[key as keyof typeof SLICE_CONFIGS];
|
||||
if (!sliceConfig?.persistConfig) {
|
||||
throw new Error(`No persist config for slice "${key}"`);
|
||||
}
|
||||
const { getInitialState, persistConfig, undoableConfig } = sliceConfig;
|
||||
let state;
|
||||
try {
|
||||
const { initialState, migrate } = persistConfig;
|
||||
const initialState = getInitialState();
|
||||
const parsed = JSON.parse(data);
|
||||
|
||||
// strip out old keys
|
||||
const stripped = pick(deepClone(parsed), keys(initialState));
|
||||
// run (additive) migrations
|
||||
const migrated = migrate(stripped);
|
||||
/*
|
||||
* Merge in initial state as default values, covering any missing keys. You might be tempted to use _.defaultsDeep,
|
||||
* but that merges arrays by index and partial objects by key. Using an identity function as the customizer results
|
||||
* in behaviour like defaultsDeep, but doesn't overwrite any values that are not undefined in the migrated state.
|
||||
*/
|
||||
const transformed = mergeWith(migrated, initialState, (objVal) => objVal);
|
||||
const unPersistDenylisted = mergeWith(stripped, initialState, (objVal) => objVal);
|
||||
// run (additive) migrations
|
||||
const migrated = persistConfig.migrate(unPersistDenylisted);
|
||||
|
||||
log.debug(
|
||||
{
|
||||
persistedData: parsed,
|
||||
rehydratedData: transformed,
|
||||
diff: diff(parsed, transformed) as JsonObject, // this is always serializable
|
||||
persistedData: parsed as JsonObject,
|
||||
rehydratedData: migrated as JsonObject,
|
||||
diff: diff(data, migrated) as JsonObject,
|
||||
},
|
||||
`Rehydrated slice "${key}"`
|
||||
);
|
||||
return transformed;
|
||||
state = migrated;
|
||||
} catch (err) {
|
||||
log.warn(
|
||||
{ error: serializeError(err as Error) },
|
||||
`Error rehydrating slice "${key}", falling back to default initial state`
|
||||
);
|
||||
return persistConfig.initialState;
|
||||
state = getInitialState();
|
||||
}
|
||||
|
||||
// Undoable slices must be wrapped in a history!
|
||||
if (undoableConfig) {
|
||||
return newHistory([], state, []);
|
||||
} else {
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
const serialize: SerializeFunction = (data, key) => {
|
||||
const persistConfig = persistConfigs[key as keyof typeof persistConfigs];
|
||||
if (!persistConfig) {
|
||||
const sliceConfig = SLICE_CONFIGS[key as keyof typeof SLICE_CONFIGS];
|
||||
if (!sliceConfig?.persistConfig) {
|
||||
throw new Error(`No persist config for slice "${key}"`);
|
||||
}
|
||||
// Heuristic to determine if the slice is undoable - could just hardcode it in the persistConfig
|
||||
const isUndoable = 'present' in data && 'past' in data && 'future' in data && '_latestUnfiltered' in data;
|
||||
const result = omit(isUndoable ? data.present : data, persistConfig.persistDenylist);
|
||||
|
||||
const result = omit(
|
||||
sliceConfig.undoableConfig ? data.present : data,
|
||||
sliceConfig.persistConfig.persistDenylist ?? []
|
||||
);
|
||||
|
||||
return JSON.stringify(result);
|
||||
};
|
||||
|
||||
export const createStore = (uniqueStoreKey?: string, persist = true) =>
|
||||
configureStore({
|
||||
const PERSISTED_KEYS = Object.values(SLICE_CONFIGS)
|
||||
.filter((sliceConfig) => !!sliceConfig.persistConfig)
|
||||
.map((sliceConfig) => sliceConfig.slice.reducerPath);
|
||||
|
||||
export const createStore = (options?: { persist?: boolean; persistDebounce?: number; onRehydrated?: () => void }) => {
|
||||
const store = configureStore({
|
||||
reducer: rememberedRootReducer,
|
||||
middleware: (getDefaultMiddleware) =>
|
||||
getDefaultMiddleware({
|
||||
// serializableCheck: false,
|
||||
// immutableCheck: false,
|
||||
serializableCheck: import.meta.env.MODE === 'development',
|
||||
immutableCheck: import.meta.env.MODE === 'development',
|
||||
})
|
||||
.concat(api.middleware)
|
||||
.concat(dynamicMiddlewares)
|
||||
.concat(authToastMiddleware)
|
||||
// .concat(getDebugLoggerMiddleware())
|
||||
// .concat(getDebugLoggerMiddleware({ withDiff: true, withNextState: true }))
|
||||
.prepend(listenerMiddleware.middleware),
|
||||
enhancers: (getDefaultEnhancers) => {
|
||||
const _enhancers = getDefaultEnhancers().concat(autoBatchEnhancer());
|
||||
if (persist) {
|
||||
_enhancers.push(
|
||||
rememberEnhancer(idbKeyValDriver, keys(persistConfigs), {
|
||||
persistDebounce: 300,
|
||||
const enhancers = getDefaultEnhancers();
|
||||
if (options?.persist) {
|
||||
return enhancers.prepend(
|
||||
rememberEnhancer(reduxRememberDriver, PERSISTED_KEYS, {
|
||||
persistDebounce: options?.persistDebounce ?? 2000,
|
||||
serialize,
|
||||
unserialize,
|
||||
prefix: uniqueStoreKey ? `${STORAGE_PREFIX}${uniqueStoreKey}-` : STORAGE_PREFIX,
|
||||
prefix: '',
|
||||
errorHandler,
|
||||
})
|
||||
);
|
||||
} else {
|
||||
return enhancers;
|
||||
}
|
||||
return _enhancers;
|
||||
},
|
||||
devTools: {
|
||||
actionSanitizer,
|
||||
@@ -203,9 +228,62 @@ export const createStore = (uniqueStoreKey?: string, persist = true) =>
|
||||
},
|
||||
});
|
||||
|
||||
// Once-off listener to support waiting for rehydration before rendering the app
|
||||
startAppListening({
|
||||
actionCreator: createAction(REMEMBER_REHYDRATED),
|
||||
effect: (action, { unsubscribe }) => {
|
||||
unsubscribe();
|
||||
options?.onRehydrated?.();
|
||||
},
|
||||
});
|
||||
|
||||
return store;
|
||||
};
|
||||
|
||||
export type AppStore = ReturnType<typeof createStore>;
|
||||
export type RootState = ReturnType<AppStore['getState']>;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||
export type AppThunkDispatch = ThunkDispatch<RootState, any, UnknownAction>;
|
||||
export type AppDispatch = ReturnType<typeof createStore>['dispatch'];
|
||||
export type AppGetState = ReturnType<typeof createStore>['getState'];
|
||||
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
||||
|
||||
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
|
||||
|
||||
const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
||||
addImageUploadedFulfilledListener(startAppListening);
|
||||
|
||||
// Image deleted
|
||||
addDeleteBoardAndImagesFulfilledListener(startAppListening);
|
||||
|
||||
// User Invoked
|
||||
addAnyEnqueuedListener(startAppListening);
|
||||
addBatchEnqueuedListener(startAppListening);
|
||||
|
||||
// Socket.IO
|
||||
addSocketConnectedEventListener(startAppListening);
|
||||
|
||||
// Gallery bulk download
|
||||
addBulkDownloadListeners(startAppListening);
|
||||
|
||||
// Boards
|
||||
addImageAddedToBoardFulfilledListener(startAppListening);
|
||||
addImageRemovedFromBoardFulfilledListener(startAppListening);
|
||||
addBoardIdSelectedListener(startAppListening);
|
||||
addArchivedOrDeletedBoardListener(startAppListening);
|
||||
|
||||
// Node schemas
|
||||
addGetOpenAPISchemaListener(startAppListening);
|
||||
|
||||
// Models
|
||||
addModelSelectedListener(startAppListening);
|
||||
|
||||
// app startup
|
||||
addAppStartedListener(startAppListening);
|
||||
addModelsLoadedListener(startAppListening);
|
||||
addAppConfigReceivedListener(startAppListening);
|
||||
|
||||
// Ad-hoc upscale workflwo
|
||||
addAdHocPostProcessingRequestedListener(startAppListening);
|
||||
|
||||
addSetDefaultSettingsListener(startAppListening);
|
||||
|
||||
46
invokeai/frontend/web/src/app/store/types.ts
Normal file
46
invokeai/frontend/web/src/app/store/types.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import type { Slice } from '@reduxjs/toolkit';
|
||||
import type { UndoableOptions } from 'redux-undo';
|
||||
import type { ZodType } from 'zod';
|
||||
|
||||
type StateFromSlice<T extends Slice> = T extends Slice<infer U> ? U : never;
|
||||
|
||||
export type SliceConfig<T extends Slice> = {
|
||||
/**
|
||||
* The redux slice (return of createSlice).
|
||||
*/
|
||||
slice: T;
|
||||
/**
|
||||
* The zod schema for the slice.
|
||||
*/
|
||||
schema: ZodType<StateFromSlice<T>>;
|
||||
/**
|
||||
* A function that returns the initial state of the slice.
|
||||
*/
|
||||
getInitialState: () => StateFromSlice<T>;
|
||||
/**
|
||||
* The optional persist configuration for this slice. If omitted, the slice will not be persisted.
|
||||
*/
|
||||
persistConfig?: {
|
||||
/**
|
||||
* Migrate the state to the current version during rehydration. This method should throw an error if the migration
|
||||
* fails.
|
||||
*
|
||||
* @param state The rehydrated state.
|
||||
* @returns A correctly-shaped state.
|
||||
*/
|
||||
migrate: (state: unknown) => StateFromSlice<T>;
|
||||
/**
|
||||
* Keys to omit from the persisted state.
|
||||
*/
|
||||
persistDenylist?: (keyof StateFromSlice<T>)[];
|
||||
};
|
||||
/**
|
||||
* The optional undoable configuration for this slice. If omitted, the slice will not be undoable.
|
||||
*/
|
||||
undoableConfig?: {
|
||||
/**
|
||||
* The options to be passed into redux-undo.
|
||||
*/
|
||||
reduxUndoOptions: UndoableOptions<StateFromSlice<T>>;
|
||||
};
|
||||
};
|
||||
@@ -1,130 +1,299 @@
|
||||
import type { FilterType } from 'features/controlLayers/store/filters';
|
||||
import type { ParameterPrecision, ParameterScheduler } from 'features/parameters/types/parameterSchemas';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
import { zFilterType } from 'features/controlLayers/store/filters';
|
||||
import { zParameterPrecision, zParameterScheduler } from 'features/parameters/types/parameterSchemas';
|
||||
import { zTabName } from 'features/ui/store/uiTypes';
|
||||
import type { PartialDeep } from 'type-fest';
|
||||
import z from 'zod';
|
||||
|
||||
/**
|
||||
* A disable-able application feature
|
||||
*/
|
||||
export type AppFeature =
|
||||
| 'faceRestore'
|
||||
| 'upscaling'
|
||||
| 'lightbox'
|
||||
| 'modelManager'
|
||||
| 'githubLink'
|
||||
| 'discordLink'
|
||||
| 'bugLink'
|
||||
| 'aboutModal'
|
||||
| 'localization'
|
||||
| 'consoleLogging'
|
||||
| 'dynamicPrompting'
|
||||
| 'batches'
|
||||
| 'syncModels'
|
||||
| 'multiselect'
|
||||
| 'pauseQueue'
|
||||
| 'resumeQueue'
|
||||
| 'invocationCache'
|
||||
| 'modelCache'
|
||||
| 'bulkDownload'
|
||||
| 'starterModels'
|
||||
| 'hfToken'
|
||||
| 'retryQueueItem'
|
||||
| 'cancelAndClearAll'
|
||||
| 'chatGPT4oHigh'
|
||||
| 'modelRelationships';
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
export type SDFeature =
|
||||
| 'controlNet'
|
||||
| 'noise'
|
||||
| 'perlinNoise'
|
||||
| 'noiseThreshold'
|
||||
| 'variation'
|
||||
| 'symmetry'
|
||||
| 'seamless'
|
||||
| 'hires'
|
||||
| 'lora'
|
||||
| 'embedding'
|
||||
| 'vae'
|
||||
| 'hrf';
|
||||
const zAppFeature = z.enum([
|
||||
'faceRestore',
|
||||
'upscaling',
|
||||
'lightbox',
|
||||
'modelManager',
|
||||
'githubLink',
|
||||
'discordLink',
|
||||
'bugLink',
|
||||
'aboutModal',
|
||||
'localization',
|
||||
'consoleLogging',
|
||||
'dynamicPrompting',
|
||||
'batches',
|
||||
'syncModels',
|
||||
'multiselect',
|
||||
'pauseQueue',
|
||||
'resumeQueue',
|
||||
'invocationCache',
|
||||
'modelCache',
|
||||
'bulkDownload',
|
||||
'starterModels',
|
||||
'hfToken',
|
||||
'retryQueueItem',
|
||||
'cancelAndClearAll',
|
||||
'chatGPT4oHigh',
|
||||
'modelRelationships',
|
||||
]);
|
||||
export type AppFeature = z.infer<typeof zAppFeature>;
|
||||
|
||||
export type NumericalParameterConfig = {
|
||||
initial: number;
|
||||
sliderMin: number;
|
||||
sliderMax: number;
|
||||
numberInputMin: number;
|
||||
numberInputMax: number;
|
||||
fineStep: number;
|
||||
coarseStep: number;
|
||||
};
|
||||
const zSDFeature = z.enum([
|
||||
'controlNet',
|
||||
'noise',
|
||||
'perlinNoise',
|
||||
'noiseThreshold',
|
||||
'variation',
|
||||
'symmetry',
|
||||
'seamless',
|
||||
'hires',
|
||||
'lora',
|
||||
'embedding',
|
||||
'vae',
|
||||
'hrf',
|
||||
]);
|
||||
export type SDFeature = z.infer<typeof zSDFeature>;
|
||||
|
||||
const zNumericalParameterConfig = z.object({
|
||||
initial: z.number().default(512),
|
||||
sliderMin: z.number().default(64),
|
||||
sliderMax: z.number().default(1536),
|
||||
numberInputMin: z.number().default(64),
|
||||
numberInputMax: z.number().default(4096),
|
||||
fineStep: z.number().default(8),
|
||||
coarseStep: z.number().default(64),
|
||||
});
|
||||
|
||||
/**
|
||||
* Configuration options for the InvokeAI UI.
|
||||
* Distinct from system settings which may be changed inside the app.
|
||||
*/
|
||||
export type AppConfig = {
|
||||
export const zAppConfig = z.object({
|
||||
/**
|
||||
* Whether or not we should update image urls when image loading errors
|
||||
*/
|
||||
shouldUpdateImagesOnConnect: boolean;
|
||||
shouldFetchMetadataFromApi: boolean;
|
||||
shouldUpdateImagesOnConnect: z.boolean(),
|
||||
shouldFetchMetadataFromApi: z.boolean(),
|
||||
/**
|
||||
* Sets a size limit for outputs on the upscaling tab. This is a maximum dimension, so the actual max number of pixels
|
||||
* will be the square of this value.
|
||||
*/
|
||||
maxUpscaleDimension?: number;
|
||||
allowPrivateBoards: boolean;
|
||||
allowPrivateStylePresets: boolean;
|
||||
allowClientSideUpload: boolean;
|
||||
allowPublishWorkflows: boolean;
|
||||
allowPromptExpansion: boolean;
|
||||
disabledTabs: TabName[];
|
||||
disabledFeatures: AppFeature[];
|
||||
disabledSDFeatures: SDFeature[];
|
||||
nodesAllowlist: string[] | undefined;
|
||||
nodesDenylist: string[] | undefined;
|
||||
metadataFetchDebounce?: number;
|
||||
workflowFetchDebounce?: number;
|
||||
isLocal?: boolean;
|
||||
shouldShowCredits: boolean;
|
||||
sd: {
|
||||
defaultModel?: string;
|
||||
disabledControlNetModels: string[];
|
||||
disabledControlNetProcessors: FilterType[];
|
||||
maxUpscaleDimension: z.number().optional(),
|
||||
allowPrivateBoards: z.boolean(),
|
||||
allowPrivateStylePresets: z.boolean(),
|
||||
allowClientSideUpload: z.boolean(),
|
||||
allowPublishWorkflows: z.boolean(),
|
||||
allowPromptExpansion: z.boolean(),
|
||||
disabledTabs: z.array(zTabName),
|
||||
disabledFeatures: z.array(zAppFeature),
|
||||
disabledSDFeatures: z.array(zSDFeature),
|
||||
nodesAllowlist: z.array(z.string()).optional(),
|
||||
nodesDenylist: z.array(z.string()).optional(),
|
||||
metadataFetchDebounce: z.number().int().optional(),
|
||||
workflowFetchDebounce: z.number().int().optional(),
|
||||
isLocal: z.boolean().optional(),
|
||||
shouldShowCredits: z.boolean().optional(),
|
||||
sd: z.object({
|
||||
defaultModel: z.string().optional(),
|
||||
disabledControlNetModels: z.array(z.string()),
|
||||
disabledControlNetProcessors: z.array(zFilterType),
|
||||
// Core parameters
|
||||
iterations: NumericalParameterConfig;
|
||||
width: NumericalParameterConfig; // initial value comes from model
|
||||
height: NumericalParameterConfig; // initial value comes from model
|
||||
steps: NumericalParameterConfig;
|
||||
guidance: NumericalParameterConfig;
|
||||
cfgRescaleMultiplier: NumericalParameterConfig;
|
||||
img2imgStrength: NumericalParameterConfig;
|
||||
scheduler?: ParameterScheduler;
|
||||
vaePrecision?: ParameterPrecision;
|
||||
iterations: zNumericalParameterConfig,
|
||||
width: zNumericalParameterConfig,
|
||||
height: zNumericalParameterConfig,
|
||||
steps: zNumericalParameterConfig,
|
||||
guidance: zNumericalParameterConfig,
|
||||
cfgRescaleMultiplier: zNumericalParameterConfig,
|
||||
img2imgStrength: zNumericalParameterConfig,
|
||||
scheduler: zParameterScheduler.optional(),
|
||||
vaePrecision: zParameterPrecision.optional(),
|
||||
// Canvas
|
||||
boundingBoxHeight: NumericalParameterConfig; // initial value comes from model
|
||||
boundingBoxWidth: NumericalParameterConfig; // initial value comes from model
|
||||
scaledBoundingBoxHeight: NumericalParameterConfig; // initial value comes from model
|
||||
scaledBoundingBoxWidth: NumericalParameterConfig; // initial value comes from model
|
||||
canvasCoherenceStrength: NumericalParameterConfig;
|
||||
canvasCoherenceEdgeSize: NumericalParameterConfig;
|
||||
infillTileSize: NumericalParameterConfig;
|
||||
infillPatchmatchDownscaleSize: NumericalParameterConfig;
|
||||
boundingBoxHeight: zNumericalParameterConfig,
|
||||
boundingBoxWidth: zNumericalParameterConfig,
|
||||
scaledBoundingBoxHeight: zNumericalParameterConfig,
|
||||
scaledBoundingBoxWidth: zNumericalParameterConfig,
|
||||
canvasCoherenceStrength: zNumericalParameterConfig,
|
||||
canvasCoherenceEdgeSize: zNumericalParameterConfig,
|
||||
infillTileSize: zNumericalParameterConfig,
|
||||
infillPatchmatchDownscaleSize: zNumericalParameterConfig,
|
||||
// Misc advanced
|
||||
clipSkip: NumericalParameterConfig; // slider and input max are ignored for this, because the values depend on the model
|
||||
maskBlur: NumericalParameterConfig;
|
||||
hrfStrength: NumericalParameterConfig;
|
||||
dynamicPrompts: {
|
||||
maxPrompts: NumericalParameterConfig;
|
||||
};
|
||||
ca: {
|
||||
weight: NumericalParameterConfig;
|
||||
};
|
||||
};
|
||||
flux: {
|
||||
guidance: NumericalParameterConfig;
|
||||
};
|
||||
};
|
||||
clipSkip: zNumericalParameterConfig, // slider and input max are ignored for this, because the values depend on the model
|
||||
maskBlur: zNumericalParameterConfig,
|
||||
hrfStrength: zNumericalParameterConfig,
|
||||
dynamicPrompts: z.object({
|
||||
maxPrompts: zNumericalParameterConfig,
|
||||
}),
|
||||
ca: z.object({
|
||||
weight: zNumericalParameterConfig,
|
||||
}),
|
||||
}),
|
||||
flux: z.object({
|
||||
guidance: zNumericalParameterConfig,
|
||||
}),
|
||||
});
|
||||
|
||||
export type AppConfig = z.infer<typeof zAppConfig>;
|
||||
export type PartialAppConfig = PartialDeep<AppConfig>;
|
||||
|
||||
export const getDefaultAppConfig = (): AppConfig => ({
|
||||
isLocal: true,
|
||||
shouldUpdateImagesOnConnect: false,
|
||||
shouldFetchMetadataFromApi: false,
|
||||
allowPrivateBoards: false,
|
||||
allowPrivateStylePresets: false,
|
||||
allowClientSideUpload: false,
|
||||
allowPublishWorkflows: false,
|
||||
allowPromptExpansion: false,
|
||||
shouldShowCredits: false,
|
||||
disabledTabs: [],
|
||||
disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[],
|
||||
disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[],
|
||||
sd: {
|
||||
disabledControlNetModels: [],
|
||||
disabledControlNetProcessors: [],
|
||||
iterations: {
|
||||
initial: 1,
|
||||
sliderMin: 1,
|
||||
sliderMax: 1000,
|
||||
numberInputMin: 1,
|
||||
numberInputMax: 10000,
|
||||
fineStep: 1,
|
||||
coarseStep: 1,
|
||||
},
|
||||
width: zNumericalParameterConfig.parse({}), // initial value comes from model
|
||||
height: zNumericalParameterConfig.parse({}), // initial value comes from model
|
||||
boundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model
|
||||
boundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model
|
||||
scaledBoundingBoxWidth: zNumericalParameterConfig.parse({}), // initial value comes from model
|
||||
scaledBoundingBoxHeight: zNumericalParameterConfig.parse({}), // initial value comes from model
|
||||
scheduler: 'dpmpp_3m_k' as const,
|
||||
vaePrecision: 'fp32' as const,
|
||||
steps: {
|
||||
initial: 30,
|
||||
sliderMin: 1,
|
||||
sliderMax: 100,
|
||||
numberInputMin: 1,
|
||||
numberInputMax: 500,
|
||||
fineStep: 1,
|
||||
coarseStep: 1,
|
||||
},
|
||||
guidance: {
|
||||
initial: 7,
|
||||
sliderMin: 1,
|
||||
sliderMax: 20,
|
||||
numberInputMin: 1,
|
||||
numberInputMax: 200,
|
||||
fineStep: 0.1,
|
||||
coarseStep: 0.5,
|
||||
},
|
||||
img2imgStrength: {
|
||||
initial: 0.7,
|
||||
sliderMin: 0,
|
||||
sliderMax: 1,
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 1,
|
||||
fineStep: 0.01,
|
||||
coarseStep: 0.05,
|
||||
},
|
||||
canvasCoherenceStrength: {
|
||||
initial: 0.3,
|
||||
sliderMin: 0,
|
||||
sliderMax: 1,
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 1,
|
||||
fineStep: 0.01,
|
||||
coarseStep: 0.05,
|
||||
},
|
||||
hrfStrength: {
|
||||
initial: 0.45,
|
||||
sliderMin: 0,
|
||||
sliderMax: 1,
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 1,
|
||||
fineStep: 0.01,
|
||||
coarseStep: 0.05,
|
||||
},
|
||||
canvasCoherenceEdgeSize: {
|
||||
initial: 16,
|
||||
sliderMin: 0,
|
||||
sliderMax: 128,
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 1024,
|
||||
fineStep: 8,
|
||||
coarseStep: 16,
|
||||
},
|
||||
cfgRescaleMultiplier: {
|
||||
initial: 0,
|
||||
sliderMin: 0,
|
||||
sliderMax: 0.99,
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 0.99,
|
||||
fineStep: 0.05,
|
||||
coarseStep: 0.1,
|
||||
},
|
||||
clipSkip: {
|
||||
initial: 0,
|
||||
sliderMin: 0,
|
||||
sliderMax: 12, // determined by model selection, unused in practice
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 12, // determined by model selection, unused in practice
|
||||
fineStep: 1,
|
||||
coarseStep: 1,
|
||||
},
|
||||
infillPatchmatchDownscaleSize: {
|
||||
initial: 1,
|
||||
sliderMin: 1,
|
||||
sliderMax: 10,
|
||||
numberInputMin: 1,
|
||||
numberInputMax: 10,
|
||||
fineStep: 1,
|
||||
coarseStep: 1,
|
||||
},
|
||||
infillTileSize: {
|
||||
initial: 32,
|
||||
sliderMin: 16,
|
||||
sliderMax: 64,
|
||||
numberInputMin: 16,
|
||||
numberInputMax: 256,
|
||||
fineStep: 1,
|
||||
coarseStep: 1,
|
||||
},
|
||||
maskBlur: {
|
||||
initial: 16,
|
||||
sliderMin: 0,
|
||||
sliderMax: 128,
|
||||
numberInputMin: 0,
|
||||
numberInputMax: 512,
|
||||
fineStep: 1,
|
||||
coarseStep: 1,
|
||||
},
|
||||
ca: {
|
||||
weight: {
|
||||
initial: 1,
|
||||
sliderMin: 0,
|
||||
sliderMax: 2,
|
||||
numberInputMin: -1,
|
||||
numberInputMax: 2,
|
||||
fineStep: 0.01,
|
||||
coarseStep: 0.05,
|
||||
},
|
||||
},
|
||||
dynamicPrompts: {
|
||||
maxPrompts: {
|
||||
initial: 100,
|
||||
sliderMin: 1,
|
||||
sliderMax: 1000,
|
||||
numberInputMin: 1,
|
||||
numberInputMax: 10000,
|
||||
fineStep: 1,
|
||||
coarseStep: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
flux: {
|
||||
guidance: {
|
||||
initial: 4,
|
||||
sliderMin: 2,
|
||||
sliderMax: 6,
|
||||
numberInputMin: 1,
|
||||
numberInputMax: 20,
|
||||
fineStep: 0.1,
|
||||
coarseStep: 0.5,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -67,6 +67,8 @@ export type Feature =
|
||||
| 'scale'
|
||||
| 'creativity'
|
||||
| 'structure'
|
||||
| 'tileSize'
|
||||
| 'tileOverlap'
|
||||
| 'optimizedDenoising'
|
||||
| 'fluxDevLicense';
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user