mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 17:57:54 -05:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a47ef3ded9 | ||
|
|
7cb9b654f3 | ||
|
|
8819e12a86 | ||
|
|
967eb60ea9 | ||
|
|
b1091ecda1 | ||
|
|
2723dd9051 | ||
|
|
8f050d992e | ||
|
|
0346095876 | ||
|
|
f9bbc55f74 | ||
|
|
878a3907e9 | ||
|
|
4cfb41d9ae | ||
|
|
6ec64ecb3c | ||
|
|
540315edaa | ||
|
|
cf10a1b736 | ||
|
|
9fb2a43780 | ||
|
|
1b743f7d9b | ||
|
|
d7bf3f7d7b | ||
|
|
eba31e7caf | ||
|
|
bde456f9fa | ||
|
|
9ee83380e6 | ||
|
|
6982e6a469 | ||
|
|
0f4d71ed63 | ||
|
|
8f3f64b22e | ||
|
|
dba0280790 |
12
.editorconfig
Normal file
12
.editorconfig
Normal file
@@ -0,0 +1,12 @@
|
||||
# All files
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Python
|
||||
[*.py]
|
||||
indent_size = 4
|
||||
6
.github/workflows/test-invoke-conda.yml
vendored
6
.github/workflows/test-invoke-conda.yml
vendored
@@ -106,10 +106,16 @@ jobs:
|
||||
run: |
|
||||
python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
--no-nsfw_checker \
|
||||
--model ${{ matrix.stable-diffusion-model }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }} \
|
||||
--root="${{ env.INVOKEAI_ROOT }}" \
|
||||
|
||||
6
.github/workflows/test-invoke-pip.yml
vendored
6
.github/workflows/test-invoke-pip.yml
vendored
@@ -105,10 +105,16 @@ jobs:
|
||||
run: |
|
||||
${{ env.pythonLocation }}/bin/python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
run: |
|
||||
time ${{ env.pythonLocation }}/bin/python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
--no-nsfw_checker \
|
||||
--model ${{ matrix.stable-diffusion-model }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }} \
|
||||
--root="${{ env.INVOKEAI_ROOT }}" \
|
||||
|
||||
@@ -5,6 +5,8 @@ SAMPLER_CHOICES = [
|
||||
"ddim",
|
||||
"k_dpm_2_a",
|
||||
"k_dpm_2",
|
||||
"k_dpmpp_2_a",
|
||||
"k_dpmpp_2",
|
||||
"k_euler_a",
|
||||
"k_euler",
|
||||
"k_heun",
|
||||
|
||||
@@ -9,20 +9,21 @@ read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh InvokeAI
|
||||
cp install.sh.in InvokeAI/install.sh
|
||||
chmod a+x InvokeAI/install.sh
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r InvokeAI-linux.zip InvokeAI
|
||||
zip -r InvokeAI-mac.zip InvokeAI
|
||||
zip -r InvokeAI-binary-linux.zip InvokeAI
|
||||
zip -r InvokeAI-binary-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat InvokeAI
|
||||
cp install.bat.in InvokeAI/install.bat
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r InvokeAI-windows.zip InvokeAI
|
||||
zip -r InvokeAI-binary-windows.zip InvokeAI
|
||||
|
||||
rm -rf InvokeAI
|
||||
|
||||
@@ -22,9 +22,7 @@ set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
#set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/test-installer.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/development.tar.gz
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
@@ -127,7 +125,7 @@ if %errorlevel% neq 0 goto err_exit
|
||||
echo ***** Updated pip and wheel *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
@@ -140,11 +138,11 @@ set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
copy installer\invoke.bat .\invoke.bat
|
||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||
echo ***** Installed invoke launcher script ******
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q installer installer_files
|
||||
rd /s /q binary_installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
||||
@@ -1,5 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
@@ -22,6 +26,8 @@ function _err_exit {
|
||||
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||
|
||||
export no_cache_dir="--no-cache-dir"
|
||||
if [ $# -ge 1 ]; then
|
||||
if [ "$1" = "use-cache" ]; then
|
||||
@@ -29,10 +35,6 @@ if [ $# -ge 1 ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$no_cache_dir"
|
||||
|
||||
echo -e "\n***** Installing InvokeAI... *****\n"
|
||||
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
@@ -80,19 +82,17 @@ if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
fi
|
||||
|
||||
# config
|
||||
echo "USING development BRANCH. REMEMBER TO CHANGE TO main BEFORE RELEASE"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/test-installer.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/development.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
@@ -192,32 +192,33 @@ echo -e "We're running under"
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location --upgrade pip wheel
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip and wheel *****\n"
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location -r requirements.txt
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location -e .
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp installer/invoke.sh .
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+x ./invoke.sh
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
rm -rf installer/ installer_files/
|
||||
rm -rf binary_installer/ installer_files/
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/configure_invokeai.py
|
||||
@@ -227,6 +228,8 @@ deactivate
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo "All done! Run the command './invoke.sh' to start InvokeAI."
|
||||
echo "All done! Run the command"
|
||||
echo " \"$scriptdir/invoke.sh\""
|
||||
echo "to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
0
installer/invoke.sh → binary_installer/invoke.sh.in
Executable file → Normal file
0
installer/invoke.sh → binary_installer/invoke.sh.in
Executable file → Normal file
@@ -4,7 +4,7 @@
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-linux-x86_64-cuda-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--trusted-host https
|
||||
|
||||
absl-py==1.3.0 \
|
||||
@@ -4,6 +4,7 @@
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-windows-x86_64-cuda-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https
|
||||
|
||||
@@ -150,6 +151,10 @@ blinker==1.5 \
|
||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||
# via streamlit
|
||||
boltons==21.0.0 \
|
||||
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||
# via torchsde
|
||||
cachetools==5.2.0 \
|
||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||
@@ -614,8 +619,8 @@ jsonschema==4.17.0 \
|
||||
# via
|
||||
# altair
|
||||
# jsonmerge
|
||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||
# via -r installer/requirements.in
|
||||
kiwisolver==1.4.4 \
|
||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||
@@ -1009,6 +1014,7 @@ numpy==1.23.4 \
|
||||
# tifffile
|
||||
# torch-fidelity
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
# transformers
|
||||
oauthlib==3.2.2 \
|
||||
@@ -1660,6 +1666,7 @@ scipy==1.9.3 \
|
||||
# scikit-learn
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchsde
|
||||
semver==2.13.0 \
|
||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||
@@ -1863,6 +1870,7 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
torch-fidelity==0.3.0 \
|
||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||
@@ -1876,6 +1884,10 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchsde==0.2.5 \
|
||||
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||
# via k-diffusion
|
||||
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||
@@ -1925,6 +1937,9 @@ tqdm==4.64.1 \
|
||||
# taming-transformers-rom1504
|
||||
# torch-fidelity
|
||||
# transformers
|
||||
trampoline==0.1.2 \
|
||||
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||
# via torchsde
|
||||
transformers==4.24.0 \
|
||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||
@@ -171,12 +171,12 @@ title: Changelog
|
||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||
unecesarry caches by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||
- add --no-interactive to preload_models step by @mauwii in
|
||||
- add --no-interactive to configure_invokeai step by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||
contained environment (if necessary) before running the normal installation
|
||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||
- preload_models.py script downloads the weight files by @lstein in
|
||||
- configure_invokeai.py script downloads the weight files by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||
|
||||
## v2.0.1 <small>(13 October 2022)</small>
|
||||
|
||||
BIN
docs/assets/canvas/biker_granny.png
Normal file
BIN
docs/assets/canvas/biker_granny.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 359 KiB |
BIN
docs/assets/canvas/biker_jacket_granny.png
Normal file
BIN
docs/assets/canvas/biker_jacket_granny.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 528 KiB |
BIN
docs/assets/canvas/mask_granny.png
Normal file
BIN
docs/assets/canvas/mask_granny.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 601 KiB |
BIN
docs/assets/canvas/staging_area.png
Normal file
BIN
docs/assets/canvas/staging_area.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 59 KiB |
89
docs/features/NSFW.md
Normal file
89
docs/features/NSFW.md
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
title: The NSFW Checker
|
||||
---
|
||||
|
||||
# :material-image-off: NSFW Checker
|
||||
|
||||
## The NSFW ("Safety") Checker
|
||||
|
||||
The Stable Diffusion image generation models will produce sexual
|
||||
imagery if deliberately prompted, and will occasionally produce such
|
||||
images when this is not intended. Such images are colloquially known
|
||||
as "Not Safe for Work" (NSFW). This behavior is due to the nature of
|
||||
the training set that Stable Diffusion was trained on, which culled
|
||||
millions of "aesthetic" images from the Internet.
|
||||
|
||||
You may not wish to be exposed to these images, and in some
|
||||
jurisdictions it may be illegal to publicly distribute such imagery,
|
||||
including mounting a publicly-available server that provides
|
||||
unfiltered images to the public. Furthermore, the [Stable Diffusion
|
||||
weights
|
||||
License](https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE-ModelWeights.txt)
|
||||
forbids the model from being used to "exploit any of the
|
||||
vulnerabilities of a specific group of persons."
|
||||
|
||||
For these reasons Stable Diffusion offers a "safety checker," a
|
||||
machine learning model trained to recognize potentially disturbing
|
||||
imagery. When a potentially NSFW image is detected, the checker will
|
||||
blur the image and paste a warning icon on top. The checker can be
|
||||
turned on and off on the command line using `--nsfw_checker` and
|
||||
`--no-nsfw_checker`.
|
||||
|
||||
At installation time, InvokeAI will ask whether the checker should be
|
||||
activated by default (neither argument given on the command line). The
|
||||
response is stored in the InvokeAI initialization file (usually
|
||||
`.invokeai` in your home directory). You can change the default at any
|
||||
time by opening this file in a text editor and commenting or
|
||||
uncommenting the line `--nsfw_checker`.
|
||||
|
||||
## Caveats
|
||||
|
||||
There are a number of caveats that you need to be aware of.
|
||||
|
||||
### Accuracy
|
||||
|
||||
The checker is [not perfect](https://arxiv.org/abs/2210.04610).It will
|
||||
occasionally flag innocuous images (false positives), and will
|
||||
frequently miss violent and gory imagery (false negatives). It rarely
|
||||
fails to flag sexual imagery, but this has been known to happen. For
|
||||
these reasons, the InvokeAI team prefers to refer to the software as a
|
||||
"NSFW Checker" rather than "safety checker."
|
||||
|
||||
### Memory Usage and Performance
|
||||
|
||||
The NSFW checker consumes an additional 1.2G of GPU VRAM on top of the
|
||||
3.4G of VRAM used by Stable Diffusion v1.5 (this is with
|
||||
half-precision arithmetic). This means that the checker will not run
|
||||
successfully on GPU cards with less than 6GB VRAM, and will reduce the
|
||||
size of the images that you can produce.
|
||||
|
||||
The checker also introduces a slight performance penalty. Images will
|
||||
take ~1 second longer to generate when the checker is
|
||||
activated. Generally this is not noticeable.
|
||||
|
||||
### Intermediate Images in the Web UI
|
||||
|
||||
The checker only operates on the final image produced by the Stable
|
||||
Diffusion algorithm. If you are using the Web UI and have enabled the
|
||||
display of intermediate images, you will briefly be exposed to a
|
||||
low-resolution (mosaicized) version of the final image before it is
|
||||
flagged by the checker and replaced by a fully blurred version. You
|
||||
are encouraged to turn **off** intermediate image rendering when you
|
||||
are using the checker. Future versions of InvokeAI will apply
|
||||
additional blurring to intermediate images when the checker is active.
|
||||
|
||||
### Watermarking
|
||||
|
||||
InvokeAI does not apply any sort of watermark to images it
|
||||
generates. However, it does write metadata into the PNG data area,
|
||||
including the prompt used to generate the image and relevant parameter
|
||||
settings. These fields can be examined using the `sd-metadata.py`
|
||||
script that comes with the InvokeAI package.
|
||||
|
||||
Note that several other Stable Diffusion distributions offer
|
||||
wavelet-based "invisible" watermarking. We have experimented with the
|
||||
library used to generate these watermarks and have reached the
|
||||
conclusion that while the watermarking library may be adding
|
||||
watermarks to PNG images, the currently available version is unable to
|
||||
retrieve them successfully. If and when a functioning version of the
|
||||
library becomes available, we will offer this feature as well.
|
||||
@@ -120,7 +120,7 @@ A number of caveats:
|
||||
(`--iterations`) argument.
|
||||
|
||||
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||
released by runwayML and installed by default by `scripts/preload_models.py`.
|
||||
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
|
||||
This model was trained specifically to harmoniously fill in image gaps. The
|
||||
standard model will work as well, but you may notice color discontinuities at
|
||||
the border.
|
||||
|
||||
@@ -28,11 +28,11 @@ should "just work" without further intervention. Simply pass the `--upscale`
|
||||
the popup in the Web GUI.
|
||||
|
||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
||||
loaded when you run `scripts/preload_models.py`. If GFPAN is failing with an
|
||||
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
|
||||
error, please run the following from the InvokeAI directory:
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
||||
@@ -110,7 +110,7 @@ This repo also allows you to perform face restoration using
|
||||
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
||||
|
||||
In order to setup CodeFormer to work, you need to download the models like with
|
||||
GFPGAN. You can do this either by running `preload_models.py` or by manually
|
||||
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
|
||||
downloading the
|
||||
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||
|
||||
117
docs/features/UNIFIED_CANVAS.md
Normal file
117
docs/features/UNIFIED_CANVAS.md
Normal file
@@ -0,0 +1,117 @@
|
||||
The Unified Canvas is a tool designed to streamline and simplify the process of composing an image using Stable Diffusion. It offers artists all of the available Stable Diffusion generation modes (Text To Image, Image To Image, Inpainting, and Outpainting) as a single unified workflow. The flexibility of the tool allows you to tweak and edit image generations, extend images beyond their initial size, and to create new content in a freeform way both inside and outside of existing images.
|
||||
|
||||
This document explains the basics of using the Unified Canvas, introducing you to its features and tools one by one. It also describes some of the more advanced tools available to power users of the Canvas.
|
||||
|
||||
# Basics
|
||||
The Unified Canvas consists of two layers: the **Base Layer** and the **Mask Layer**. You can swap from one layer to the other by selecting the layer you want in the drop-down menu on the top left corner of the Unified Canvas, or by pressing the (Q) hotkey.
|
||||
|
||||
### Base Layer
|
||||
The **Base Layer** is the image content currently managed by the Canvas, and can be exported at any time to the gallery by using the **Save to Gallery** option. When the Base Layer is selected, the Brush (B) and Eraser (E) tools will directly manipulate the base layer. Any images uploaded to the Canvas, or sent to the Unified Canvas from the gallery, will clear out all existing content and set the Base layer to the new image.
|
||||
|
||||
### Staging Area
|
||||
When you generate images, they will display in the Canvas's **Staging Area**, alongside the Staging Area toolbar buttons. While the Staging Area is active, you cannot interact with the Canvas itself.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Accepting generations will commit the new generation to the **Base Layer**. You can review all generated images using the Prev/Next arrows, save any individual generations to your gallery (without committing to the Base layer) or discard generations. While you can Undo a discard in an individual Canvas session, any generations that are not saved will be lost when the Canvas resets.
|
||||
|
||||
### Mask Layer
|
||||
The **Mask Layer** consists of any masked sections that have been created to inform Inpainting generations. You can paint a new mask, or edit an existing mask, using the Brush tool and the Eraser with the Mask layer set as your Active layer. Any masked areas will only affect generation inside of the current bounding box.
|
||||
|
||||
### Bounding Box
|
||||
When generating a new image, Invoke will process and apply new images within the area denoted by the **Bounding Box**. The Width & Height settings of the Bounding Box, as well as its location within the Unified Canvas and pixels or empty space that it encloses, determine how new invocations are generated - see [Inpainting & Outpainting](#inpainting-and-outpainting) below. The Bounding Box can be moved and resized using the Move (V) tool. It can also be resized using the Bounding Box options in the Options Panel. By using these controls you can generate larger or smaller images, control which sections of the image are being processed, as well as control Bounding Box tools like the Bounding Box fill/erase.
|
||||
|
||||
### <a name="inpainting-and-outpainting"></a> Inpainting & Outpainting
|
||||
"Inpainting" means asking the AI to refine part of an image while leaving the rest alone. For example, updating a portrait of your grandmother to have her wear a biker's jacket.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
"Outpainting" means asking the AI to expand the original image beyond its original borders, making a bigger image that's still based on the original. For example, extending the above image of your Grandmother in a biker's jacket to include her wearing jeans (and while we're at it, a motorcycle!)
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
When you are using the Unified Canvas, Invoke decides automatically whether to do Inpainting, Outpainting, ImageToImage, or TextToImage by looking inside the area enclosed by the Bounding Box. It chooses the appropriate type of generation based on whether the Bounding Box contains empty (transparent) areas on the Base layer, or whether it contains colored areas from previous generations (or from painted brushstrokes) on the Base layer, and/or whether the Mask layer contains any brushstrokes. See [Generation Methods](#generation-methods) below for more information.
|
||||
|
||||
# Getting Started
|
||||
|
||||
To get started with the Unified Canvas, you will want to generate a new base layer using Txt2Img or importing an initial image. We'll refer to either of these methods as the "initial image" in the below guide.
|
||||
|
||||
From there, you can consider the following techniques to augment your image:
|
||||
* **New Images**: Move the bounding box to an empty area of the Canvas, type in your prompt, and Invoke, to generate a new image using the Text to Image function.
|
||||
* **Image Correction**: Use the color picker and brush tool to paint corrections on the image, switch to the Mask layer, and brush a mask over your painted area to use **Inpainting**. You can also use the **ImageToImage** generation method to invoke new interpretations of the image.
|
||||
* **Image Expansion**: Move the bounding box to include a portion of your initial image, and a portion of transparent/empty pixels, then Invoke using a prompt that describes what you'd like to see in that area. This will Outpaint the image. You'll typically find more coherent results if you keep about 50-60% of the original image in the bounding box. Make sure that the Image To Image Strength slider is set to a high value - you may need to set it higher than you are used to.
|
||||
* **New Content on Existing Images**: If you want to add new details or objects into your image, use the brush tool to paint a sketch of what you'd like to see on the image, switch to the Mask layer, and brush a mask over your painted area to use **Inpainting**. If the masked area is small, consider using a smaller bounding box to take advantage of Invoke's automatic Scaling features, which can help to produce better details.
|
||||
* **And more**: There are a number of creative ways to use the Canvas, and the above are just starting points. We're excited to see what you come up with!
|
||||
|
||||
|
||||
# <a name="generation-methods"></a> Generation Methods
|
||||
The Canvas can use all generation methods available (Txt2Img, Img2Img, Inpainting, and Outpainting), and these will be automatically selected and used based on the current selection area within the Bounding Box.
|
||||
|
||||
## Text to Image
|
||||
If the Bounding Box is placed over an area of Canvas with an **empty Base Layer**, invoking a new image will use **TextToImage**. This generates an entirely new image based on your prompt.
|
||||
|
||||
## Image to Image
|
||||
If the Bounding Box is placed over an area of Canvas with an **existing Base Layer area with no transparent pixels or masks**, invoking a new image will use **ImageToImage**. This uses the image within the bounding box and your prompt to interpret a new image. The image will be closer to your original image at lower Image to Image strengths.
|
||||
|
||||
## Inpainting
|
||||
If the Bounding Box is placed over an area of Canvas with an **existing Base Layer and any pixels selected using the Mask layer**, invoking a new image will use **Inpainting**. Inpainting uses the existing colors/forms in the masked area in order to generate a new image for the masked area only. The unmasked portion of the image will remain the same. Image to Image strength applies to the inpainted area.
|
||||
|
||||
If you desire something completely different from the original image in your new generation (i.e., if you want Invoke to ignore existing colors/forms), consider toggling the Inpaint Replace setting on, and use high values for both Inpaint Replace and Image To Image Strength.
|
||||
|
||||
> Note: By default, the **Scale Before Processing** option — which inpaints more coherent details by generating at a larger resolution and then scaling — is only activated when the Bounding Box is relatively small. To get the best inpainting results you should therefore resize your Bounding Box to the smallest area that contains your mask and enough surrounding detail to help Stable Diffusion understand the context of what you want it to draw. You should also update your prompt so that it describes *just* the area within the Bounding Box.
|
||||
|
||||
## Outpainting
|
||||
If the Bounding Box is placed over an area of Canvas partially filled by an existing Base Layer area and partially by transparent pixels or masks, invoking a new image will use **Outpainting**, as well as **Inpainting** any masked areas.
|
||||
|
||||
____
|
||||
|
||||
# Advanced Features
|
||||
|
||||
Features with non-obvious behavior are detailed below, in order to provide clarity on the intent and common use cases we expect for utilizing them.
|
||||
|
||||
## Toolbar
|
||||
|
||||
### Mask Options
|
||||
* **Enable Mask** - This flag can be used to Enable or Disable the currently painted mask. If you have painted a mask, but you don't want it affect the next invocation, but you *also* don't want to delete it, then you can set this option to Disable. When you want the mask back, set this back to Enable.
|
||||
* **Preserve Masked Area** - When enabled, Preserve Masked Area inverts the effect of the Mask on the Inpainting process. Pixels in masked areas will be kept unchanged, and unmasked areas will be regenerated.
|
||||
|
||||
### Creative Tools
|
||||
* **Brush - Base/Mask Modes** - The Brush tool switches automatically between different modes of operation for the Base and Mask layers respectively.
|
||||
* On the Base layer, the brush will directly paint on the Canvas using the color selected on the Brush Options menu.
|
||||
* On the Mask layer, the brush will create a new mask. If you're finding the mask difficult to see over the existing content of the Unified Canvas, you can change the color it is drawn with using the color selector on the Mask Options dropdown.
|
||||
* **Erase Bounding Box** - On the Base layer, erases all pixels within the Bounding Box.
|
||||
* **Fill Bounding Box** - On the Base layer, fills all pixels within the Bounding Box with the currently selected color.
|
||||
|
||||
### Canvas Tools
|
||||
* **Move Tool** - Allows for manipulation of the Canvas view (by dragging on the Canvas, outside the bounding box), the Bounding Box (by dragging the edges of the box), or the Width/Height of the Bounding Box (by dragging one of the 9 directional handles).
|
||||
* **Reset View** - Click to re-orients the view to the center of the Bounding Box.
|
||||
* **Merge Visible** - If your browser is having performance problems drawing the image in the Unified Canvas, click this to consolidate all of the information currently being rendered by your browser into a merged copy of the image. This lowers the resource requirements and should improve performance.
|
||||
|
||||
## Seam Correction
|
||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated by Stable Diffusion into your existing image. To do this, the area around the `seam` at the boundary between your image and the new generation is automatically blended to produce a seamless output. In a fully automatic process, a mask is generated to cover the seam, and then the area of the seam is Inpainted.
|
||||
|
||||
Although the default options should work well most of the time, sometimes it can help to alter the parameters that control the seam Inpainting. A wider seam and a blur setting of about 1/3 of the seam have been noted as producing consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with both sides). Seam strength of 0.7 is best for reducing hard seams.
|
||||
* **Seam Size** - The size of the seam masked area. Set higher to make a larger mask around the seam.
|
||||
* **Seam Blur** - The size of the blur that is applied on *each* side of the masked area.
|
||||
* **Seam Strength** - The Image To Image Strength parameter used for the Inpainting generation that is applied to the seam area.
|
||||
* **Seam Steps** - The number of generation steps that should be used to Inpaint the seam.
|
||||
|
||||
## Infill & Scaling
|
||||
* **Scale Before Processing & W/H**: When generating images with a bounding box smaller than the optimized W/H of the model (e.g., 512x512 for SD1.5), this feature first generates at a larger size with the same aspect ratio, and then scales that image down to fill the selected area. This is particularly useful when inpainting very small details. Scaling is optional but is enabled by default.
|
||||
* **Inpaint Replace**: When Inpainting, the default method is to utilize the existing RGB values of the Base layer to inform the generation process. If Inpaint Replace is enabled, noise is generated and blended with the existing pixels (completely replacing the original RGB values at an Inpaint Replace value of 1). This can help generate more variation from the pixels on the Base layers.
|
||||
* > When using Inpaint Replace you should use a higher Image To Image Strength value, especially at higher Inpaint Replace values
|
||||
* **Infill Method**: Invoke currently supports two methods for producing RGB values for use in the Outpainting process: Patchmatch and Tile. We believe that Patchmatch is the superior method, however we provide support for Tile in case Patchmatch cannot be installed or is unavailable on your computer.
|
||||
* **Tile Size**: The Tile method for Outpainting sources small portions of the original image and randomly place these into the areas being Outpainted. This value sets the size of those tiles.
|
||||
|
||||
# Hot Keys
|
||||
The Unified Canvas is a tool that excels when you use hotkeys. You can view the full list of keyboard shortcuts, updated with all new features, by clicking the Keyboard Shortcuts icon at the top right of the InvokeAI WebUI.
|
||||
@@ -86,6 +86,10 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
Linux users who wish to make use of the PyPatchMatch inpainting
|
||||
functions will need to perform a bit of extra work to enable this
|
||||
module. Instructions can be found at [Installing PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
@@ -123,7 +127,8 @@ You wil need one of the following:
|
||||
|
||||
- [The InvokeAI Web Interface](features/WEB.md)
|
||||
- [WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
<!-- this link does not exist - [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md) -->
|
||||
- [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- seperator -->
|
||||
- [The Command Line Interace](features/CLI.md)
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
@@ -136,6 +141,7 @@ You wil need one of the following:
|
||||
- [Prompt Engineering](features/PROMPTS.md)
|
||||
<!-- seperator -->
|
||||
- Miscellaneous
|
||||
- [NSFW Checker](features/NSFW.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other](features/OTHER.md)
|
||||
|
||||
@@ -160,7 +166,7 @@ You wil need one of the following:
|
||||
- You can now load
|
||||
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
||||
without leaving the CLI.
|
||||
- The installation process (via `scripts/preload_models.py`) now lets you select
|
||||
- The installation process (via `scripts/configure_invokeai.py`) now lets you select
|
||||
among several popular
|
||||
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
||||
and downloads and installs them on your behalf. Among other models, this
|
||||
|
||||
85
docs/installation/BUILDING_BINARY_INSTALLERS.md
Normal file
85
docs/installation/BUILDING_BINARY_INSTALLERS.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# How to build "binary" installers (InvokeAI-mac/windows/linux_on_*.zip)
|
||||
|
||||
## 1. Ensure `installers/requirements.in` is correct
|
||||
|
||||
and up to date on the branch to be installed.
|
||||
|
||||
## <a name="step-2"></a> 2. Run `pip-compile` on each platform.
|
||||
|
||||
On each target platform, in the branch that is to be installed, and
|
||||
inside the InvokeAI git root folder, run the following commands:
|
||||
|
||||
```commandline
|
||||
conda activate invokeai # or however you activate python
|
||||
pip install pip-tools
|
||||
pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/<reqsfile>.txt binary_installer/requirements.in
|
||||
```
|
||||
where `<reqsfile>.txt` is whichever of
|
||||
```commandline
|
||||
py3.10-darwin-arm64-mps-reqs.txt
|
||||
py3.10-darwin-x86_64-reqs.txt
|
||||
py3.10-linux-x86_64-cuda-reqs.txt
|
||||
py3.10-windows-x86_64-cuda-reqs.txt
|
||||
```
|
||||
matches the current OS and architecture.
|
||||
> There is no way to cross-compile these. They must be done on a system matching the target OS and arch.
|
||||
|
||||
## <a name="step-3"></a> 3. Set github repository and branch
|
||||
|
||||
Once all reqs files have been collected and committed **to the branch
|
||||
to be installed**, edit `binary_installer/install.sh.in` and `binary_installer/install.bat.in` so that `RELEASE_URL`
|
||||
and `RELEASE_SOURCEBALL` point to the github repo and branch that is
|
||||
to be installed.
|
||||
|
||||
For example, to install `main` branch of `InvokeAI`, they should be
|
||||
set as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
Or, to install `damians-cool-feature` branch of `damian0815`, set them
|
||||
as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
The branch and repo specified here **must** contain the correct reqs
|
||||
files. The installer zip files **do not** contain requirements files,
|
||||
they are pulled from the specified branch during the installation
|
||||
process.
|
||||
|
||||
## 4. Create zip files.
|
||||
|
||||
cd into the `installers/` folder and run
|
||||
`./create_installers.sh`. This will create
|
||||
`InvokeAI-mac_on_<branch>.zip`,
|
||||
`InvokeAI-windows_on_<branch>.zip` and
|
||||
`InvokeAI-linux_on_<branch>.zip`. These files can be distributed to end users.
|
||||
|
||||
These zips will continue to function as installers for all future
|
||||
pushes to those branches, as long as necessary changes to
|
||||
`requirements.in` are propagated in a timely manner to the
|
||||
`py3.10-*-reqs.txt` files using pip-compile as outlined in [step
|
||||
2](#step-2).
|
||||
|
||||
To actually install, users should unzip the appropriate zip file into an empty
|
||||
folder and run `install.sh` on macOS/Linux or `install.bat` on
|
||||
Windows.
|
||||
@@ -56,7 +56,7 @@ unofficial Stable Diffusion models and where they can be obtained.
|
||||
|
||||
There are three ways to install weights files:
|
||||
|
||||
1. During InvokeAI installation, the `preload_models.py` script can download
|
||||
1. During InvokeAI installation, the `configure_invokeai.py` script can download
|
||||
them for you.
|
||||
|
||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||
@@ -65,13 +65,13 @@ There are three ways to install weights files:
|
||||
3. You can download the files manually and add the appropriate entries to
|
||||
`models.yaml`.
|
||||
|
||||
### Installation via `preload_models.py`
|
||||
### Installation via `configure_invokeai.py`
|
||||
|
||||
This is the most automatic way. Run `scripts/preload_models.py` from the
|
||||
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
|
||||
console. It will ask you to select which models to download and lead you through
|
||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||
|
||||
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
||||
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
|
||||
directory
|
||||
|
||||
!!! example ""
|
||||
@@ -238,7 +238,7 @@ arabian-nights-1.0:
|
||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||
| description | Any description that you want to add to the model to remind you what it is. |
|
||||
| weights | Relative path to the .ckpt weights file for this model. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
|
||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
title: InvokeAI Installer
|
||||
title: InvokeAI Binary Installer
|
||||
---
|
||||
|
||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||
The InvokeAI binary installer is a shell script that will install InvokeAI onto a stock
|
||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new version.
|
||||
@@ -35,8 +35,8 @@ recommended model weights files.
|
||||
## Steps to Install
|
||||
|
||||
1. Download the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||
InvokeAI's installer for your platform
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/tag/2.2.0-rc4) of
|
||||
InvokeAI's installer for your platform. Look for a file named `InvokeAI-binary-<your platform>.zip`
|
||||
|
||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
# THIS DOCUMENTATION IS UNFINISHED - VOLUNTEERS GRATEFULLY ACCEPTED
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -22,6 +22,4 @@ start running the cells one-by-one.
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@@ -155,10 +155,10 @@ command-line completion.
|
||||
process for this is described in [here](INSTALLING_MODELS.md).
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
The script `preload_models.py` will interactively guide you through the
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
@@ -220,7 +220,7 @@ greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/preload_models.py --no-interactive #optional
|
||||
python scripts/configure_invokeai.py --no-interactive #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
@@ -359,7 +359,7 @@ brew install llvm
|
||||
|
||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||
|
||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
||||
86
docs/installation/INSTALL_PATCHMATCH.md
Normal file
86
docs/installation/INSTALL_PATCHMATCH.md
Normal file
@@ -0,0 +1,86 @@
|
||||
---
|
||||
title: Installing PyPatchMatch
|
||||
---
|
||||
|
||||
# :octicons-paintbrush-16: Installing PyPatchMatch
|
||||
|
||||
pypatchmatch is a Python module for inpainting images. It is not
|
||||
needed to run InvokeAI, but it greatly improves the quality of
|
||||
inpainting and outpainting and is recommended.
|
||||
|
||||
Unfortunately, it is a C++ optimized module and installation
|
||||
can be somewhat challenging. This guide leads you through the steps.
|
||||
|
||||
## Windows
|
||||
|
||||
You're in luck! On Windows platforms PyPatchMatch will install
|
||||
automatically on Windows systems with no extra intervention.
|
||||
|
||||
## Macintosh
|
||||
|
||||
PyPatchMatch is not currently supported, but the team is working on
|
||||
it.
|
||||
|
||||
## Linux
|
||||
|
||||
Prior to installing PyPatchMatch, you need to take the following
|
||||
steps:
|
||||
|
||||
1. Install the `build-essential` tools:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install build-essential
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
|
||||
```
|
||||
sudo apt install python3-opencv libopencv-dev
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
```
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
|
||||
4. Activate the environment you use for invokeai, either with
|
||||
`conda` or with a virtual environment.
|
||||
|
||||
5. Do a "develop" install of pypatchmatch:
|
||||
|
||||
```
|
||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||
```
|
||||
|
||||
6. Confirm that pypatchmatch is installed.
|
||||
At the command-line prompt enter `python`, and
|
||||
then at the `>>>` line type `from patchmatch import patch_match`:
|
||||
It should look like the follwing:
|
||||
|
||||
```
|
||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||
[GCC 9.3.0] on linux
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
>>> from patchmatch import patch_match
|
||||
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
||||
rm -rf build/obj libpatchmatch.so
|
||||
mkdir: created directory 'build/obj'
|
||||
mkdir: created directory 'build/obj/csrc/'
|
||||
[dep] csrc/masked_image.cpp ...
|
||||
[dep] csrc/nnf.cpp ...
|
||||
[dep] csrc/inpaint.cpp ...
|
||||
[dep] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/inpaint.cpp ...
|
||||
[CC] csrc/nnf.cpp ...
|
||||
[CC] csrc/masked_image.cpp ...
|
||||
[link] libpatchmatch.so ...
|
||||
```
|
||||
|
||||
If you see no errors, then you're ready to go!
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/tag/2.2.0-rc4), and
|
||||
look for a series of files named:
|
||||
|
||||
- invokeAI-src-installer-mac.zip
|
||||
@@ -55,7 +55,7 @@ off the process.
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||
4. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
@@ -66,8 +66,17 @@ off the process.
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
Be aware that some of the library download and install steps take a long time.
|
||||
In particular, the `pytorch` package is quite large and often appears to get
|
||||
"stuck" at 99.9%. Similarly, the `pip installing requirements` step may
|
||||
appear to hang. Have patience and the installation step will eventually
|
||||
resume. However, there are occasions when the library install does
|
||||
legitimately get stuck. If you have been waiting for more than ten minutes
|
||||
and nothing is happening, you can interrupt the script with ^C. You may restart
|
||||
it and it will pick up where it left off.
|
||||
|
||||
6. After installation completes, the installer will launch a script called
|
||||
`preload_models.py`, which will guide you through the first-time process of
|
||||
`configure_invokeai.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
@@ -110,6 +119,71 @@ python scripts/invoke.py --web --max_load_models=3 \
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../features/CLI.md) documentation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
_Package dependency conflicts_ If you have previously installed
|
||||
InvokeAI or another Stable Diffusion package, the installer may
|
||||
occasionally pick up outdated libraries and either the installer or
|
||||
`invoke` will fail with complaints out library conflicts. There are
|
||||
two steps you can take to clear this problem. Both of these are done
|
||||
from within the "developer's console", which you can get to by
|
||||
launching `invoke.sh` (or `invoke.bat`) and selecting launch option
|
||||
#3:
|
||||
|
||||
1. Remove the previous `invokeai` environment completely. From within
|
||||
the developer's console, give the command `conda env remove -n
|
||||
invokeai`. This will delete previous files installed by `invoke`.
|
||||
|
||||
Then exit from the developer's console and launch the script
|
||||
`update.sh` (or `update.bat`). This will download the most recent
|
||||
InvokeAI (including bug fixes) and reinstall the environment.
|
||||
You should then be able to run `invoke.sh`/`invoke.bat`.
|
||||
|
||||
2. If this doesn't work, you can try cleaning your system's conda
|
||||
cache. This is slightly more extreme, but won't interfere with
|
||||
any other python-based programs installed on your computer.
|
||||
From the developer's console, run the command `conda clean -a`
|
||||
and answer "yes" to all prompts.
|
||||
|
||||
After this is done, run `update.sh` and try again as before.
|
||||
|
||||
_"Corrupted configuration file."__ Everything seems to install ok, but
|
||||
`invoke` complains of a corrupted configuration file and goes calls
|
||||
`configure_invokeai.py` to fix, but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive
|
||||
in the `.invokeai` initialization file that contains startup settings.
|
||||
This can be corrected by fixing the offending line.
|
||||
|
||||
First find `.invokeai`. It is a small text file located in your home
|
||||
directory, `~/.invokeai` on Mac and Linux systems, and `C:\Users\*your
|
||||
name*\.invokeai` on Windows systems. Open it with a text editor
|
||||
(e.g. Notepad on Windows, TextEdit on Macs, or `nano` on Linux)
|
||||
and look for the lines starting with `--root` and `--outdir`.
|
||||
|
||||
An example is here:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai"
|
||||
--outdir="/home/lstein/invokeai/outputs"
|
||||
```
|
||||
|
||||
There should not be whitespace before or after the directory paths,
|
||||
and the paths should not end with slashes:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai " # wrong! no whitespace here
|
||||
--root="/home\lstein\invokeai\" # wrong! shouldn't end in a slash
|
||||
```
|
||||
|
||||
Fix the problem with your text editor and save as a **plain text**
|
||||
file. This should clear the issue.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the
|
||||
problem to the [InvokeAI
|
||||
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
@@ -119,31 +193,15 @@ This section describes how to update InvokeAI to new versions of the software.
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `preload_models` script to download any updated models
|
||||
release and re-run the `configure_invokeai` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
There may be times that there is a feature in the `development` branch of
|
||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||
corrects an annoying bug. To do this, you will use the developer's console.
|
||||
|
||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||
Then run the following command to get the `development branch`:
|
||||
|
||||
```bash
|
||||
git checkout development
|
||||
git pull
|
||||
conda env update
|
||||
```
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/preload_models.py`.
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
@@ -5,12 +5,17 @@ title: Overview
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
1. [InvokeAI binary installer](INSTALL_INVOKE.md)
|
||||
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new
|
||||
version.
|
||||
third party libraries it depends on. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
|
||||
When a new InvokeAI release is available, you will run an `update`
|
||||
script to download and install the new version. Intermediate versions
|
||||
that contain experimental and possibly unstable features will not be
|
||||
available.
|
||||
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
@@ -19,23 +24,32 @@ experience and preferences.
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
- This script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try the source code installer or one of the manual methods instead.
|
||||
- The tab autocomplete feature of the command-line client,
|
||||
which completes commonly used filenames and commands, will
|
||||
not work in this version. All Web UI functions are fully
|
||||
operational, however.
|
||||
|
||||
2. [Source code installer](INSTALL_SOURCE.md)
|
||||
2. [InvokeAI source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install InvokeAI and all its essential
|
||||
third party libraries. In contrast to the previous installer, it
|
||||
includes access to a "developer console" which will allow you to
|
||||
access experimental features on the development branch.
|
||||
This is a script that will install Python, the Anaconda ("conda")
|
||||
package manager, all of InvokeAI's its essential third party
|
||||
libraries and InvokeAI itself. It includes access to a "developer
|
||||
console" which will help us debug problems with you and give you
|
||||
to access experimental features.
|
||||
|
||||
This method is recommended for individuals who are wish to stay
|
||||
on the cutting edge of InvokeAI development and are not afraid
|
||||
of occasional breakage.
|
||||
When a new InvokeAI feature is available, even between releases,
|
||||
you will be able to upgrade and try it out by running an `update`
|
||||
script. This method is recommended for individuals who wish to
|
||||
stay on the cutting edge of InvokeAI development and are not
|
||||
afraid of occasional breakage.
|
||||
|
||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||
**Important Caveats**
|
||||
- This script is a bit cranky and occasionally hangs or times out,
|
||||
forcing you to cancel and restart the script (it will pick up where
|
||||
it left off). It also takes noticeably longer to run than the
|
||||
binary installer.
|
||||
|
||||
2. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
@@ -47,14 +61,14 @@ experience and preferences.
|
||||
the cutting edge of future InvokeAI development and is willing to put
|
||||
up with occasional glitches and breakage.
|
||||
|
||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||
3. [Docker Installation](INSTALL_DOCKER.md)
|
||||
|
||||
We also offer a method for creating Docker containers containing
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
4. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
|
||||
@@ -69,7 +69,7 @@ title: Manual Installation, Linux
|
||||
machine-learning models:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -111,7 +111,7 @@ will do our best to help.
|
||||
|
||||
!!! todo "Download the model weight files"
|
||||
|
||||
The `preload_models.py` script downloads and installs the model weight
|
||||
The `configure_invokeai.py` script downloads and installs the model weight
|
||||
files for you. It will lead you through the process of getting a Hugging Face
|
||||
account, accepting the Stable Diffusion model weight license agreement, and
|
||||
creating a download token:
|
||||
@@ -119,7 +119,7 @@ will do our best to help.
|
||||
```bash
|
||||
# This will take some time, depending on the speed of your internet connection
|
||||
# and will consume about 10GB of space
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
!!! todo "Run InvokeAI!"
|
||||
@@ -220,8 +220,8 @@ There are several causes of these errors:
|
||||
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
||||
something else you haven't.
|
||||
|
||||
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/preload_models.py` or
|
||||
2. You might've run `./scripts/configure_invokeai.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/configure_invokeai.py` or
|
||||
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
||||
|
||||
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
||||
@@ -359,7 +359,7 @@ python ./scripts/txt2img.py \
|
||||
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -65,7 +65,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
environments-and-requirements/environment-mac.yml
|
||||
623
frontend/dist/assets/index.2b7cd976.js
vendored
623
frontend/dist/assets/index.2b7cd976.js
vendored
File diff suppressed because one or more lines are too long
623
frontend/dist/assets/index.637f12bd.js
vendored
Normal file
623
frontend/dist/assets/index.637f12bd.js
vendored
Normal file
File diff suppressed because one or more lines are too long
501
frontend/dist/assets/index.678a45f6.js
vendored
501
frontend/dist/assets/index.678a45f6.js
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.c609c0c8.css
vendored
Normal file
1
frontend/dist/assets/index.c609c0c8.css
vendored
Normal file
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.f999e69e.css
vendored
1
frontend/dist/assets/index.f999e69e.css
vendored
File diff suppressed because one or more lines are too long
4
frontend/dist/index.html
vendored
4
frontend/dist/index.html
vendored
@@ -6,8 +6,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.2b7cd976.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.f999e69e.css">
|
||||
<script type="module" crossorigin src="./assets/index.637f12bd.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.c609c0c8.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
@@ -9,6 +9,8 @@ export const SAMPLERS: Array<string> = [
|
||||
'k_lms',
|
||||
'k_dpm_2',
|
||||
'k_dpm_2_a',
|
||||
'k_dpmpp_2',
|
||||
'k_dpmpp_2_a',
|
||||
'k_euler',
|
||||
'k_euler_a',
|
||||
'k_heun',
|
||||
|
||||
2
frontend/src/app/invokeai.d.ts
vendored
2
frontend/src/app/invokeai.d.ts
vendored
@@ -46,6 +46,8 @@ export declare type CommonGeneratedImageMetadata = {
|
||||
| 'ddim'
|
||||
| 'k_dpm_2_a'
|
||||
| 'k_dpm_2'
|
||||
| 'k_dpmpp_2_a'
|
||||
| 'k_dpmpp_2'
|
||||
| 'k_euler_a'
|
||||
| 'k_euler'
|
||||
| 'k_heun'
|
||||
|
||||
@@ -42,7 +42,6 @@ const makeSocketIOEmitters = (
|
||||
options: optionsState,
|
||||
system: systemState,
|
||||
canvas: canvasState,
|
||||
gallery: galleryState,
|
||||
} = state;
|
||||
|
||||
const frontendToBackendParametersConfig: FrontendToBackendParametersConfig =
|
||||
@@ -55,13 +54,6 @@ const makeSocketIOEmitters = (
|
||||
|
||||
dispatch(generationRequested());
|
||||
|
||||
if (!['txt2img', 'img2img'].includes(generationMode)) {
|
||||
if (!galleryState.currentImage?.url) return;
|
||||
|
||||
frontendToBackendParametersConfig.imageToProcessUrl =
|
||||
galleryState.currentImage.url;
|
||||
}
|
||||
|
||||
const { generationParameters, esrganParameters, facetoolParameters } =
|
||||
frontendToBackendParameters(frontendToBackendParametersConfig);
|
||||
|
||||
|
||||
@@ -61,17 +61,17 @@ const makeSocketIOListeners = (
|
||||
dispatch(requestSystemConfig());
|
||||
const gallery: GalleryState = getState().gallery;
|
||||
|
||||
if (gallery.categories.user.latest_mtime) {
|
||||
dispatch(requestNewImages('user'));
|
||||
} else {
|
||||
dispatch(requestImages('user'));
|
||||
}
|
||||
|
||||
if (gallery.categories.result.latest_mtime) {
|
||||
dispatch(requestNewImages('result'));
|
||||
} else {
|
||||
dispatch(requestImages('result'));
|
||||
}
|
||||
|
||||
if (gallery.categories.user.latest_mtime) {
|
||||
dispatch(requestNewImages('user'));
|
||||
} else {
|
||||
dispatch(requestImages('user'));
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
|
||||
@@ -30,13 +30,7 @@ export const frontendToBackendParameters = (
|
||||
): { [key: string]: any } => {
|
||||
const canvasBaseLayer = getCanvasBaseLayer();
|
||||
|
||||
const {
|
||||
generationMode,
|
||||
optionsState,
|
||||
canvasState,
|
||||
systemState,
|
||||
imageToProcessUrl,
|
||||
} = config;
|
||||
const { generationMode, optionsState, canvasState, systemState } = config;
|
||||
|
||||
const {
|
||||
cfgScale,
|
||||
@@ -81,8 +75,7 @@ export const frontendToBackendParameters = (
|
||||
|
||||
const generationParameters: { [k: string]: any } = {
|
||||
prompt,
|
||||
iterations:
|
||||
shouldRandomizeSeed || shouldGenerateVariations ? iterations : 1,
|
||||
iterations,
|
||||
steps,
|
||||
cfg_scale: cfgScale,
|
||||
threshold,
|
||||
@@ -98,6 +91,9 @@ export const frontendToBackendParameters = (
|
||||
init_mask: '',
|
||||
};
|
||||
|
||||
let esrganParameters: false | { [k: string]: any } = false;
|
||||
let facetoolParameters: false | { [k: string]: any } = false;
|
||||
|
||||
generationParameters.seed = shouldRandomizeSeed
|
||||
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
|
||||
: seed;
|
||||
@@ -106,6 +102,23 @@ export const frontendToBackendParameters = (
|
||||
if (['txt2img', 'img2img'].includes(generationMode)) {
|
||||
generationParameters.seamless = seamless;
|
||||
generationParameters.hires_fix = hiresFix;
|
||||
|
||||
if (shouldRunESRGAN) {
|
||||
esrganParameters = {
|
||||
level: upscalingLevel,
|
||||
strength: upscalingStrength,
|
||||
};
|
||||
}
|
||||
|
||||
if (shouldRunFacetool) {
|
||||
facetoolParameters = {
|
||||
type: facetoolType,
|
||||
strength: facetoolStrength,
|
||||
};
|
||||
if (facetoolType === 'codeformer') {
|
||||
facetoolParameters.codeformer_fidelity = codeformerFidelity;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// img2img exclusive parameters
|
||||
@@ -145,7 +158,6 @@ export const frontendToBackendParameters = (
|
||||
|
||||
generationParameters.fit = false;
|
||||
|
||||
generationParameters.init_img = imageToProcessUrl;
|
||||
generationParameters.strength = img2imgStrength;
|
||||
|
||||
generationParameters.invert_mask = shouldPreserveMaskedArea;
|
||||
@@ -209,26 +221,6 @@ export const frontendToBackendParameters = (
|
||||
generationParameters.variation_amount = 0;
|
||||
}
|
||||
|
||||
let esrganParameters: false | { [k: string]: any } = false;
|
||||
let facetoolParameters: false | { [k: string]: any } = false;
|
||||
|
||||
if (shouldRunESRGAN) {
|
||||
esrganParameters = {
|
||||
level: upscalingLevel,
|
||||
strength: upscalingStrength,
|
||||
};
|
||||
}
|
||||
|
||||
if (shouldRunFacetool) {
|
||||
facetoolParameters = {
|
||||
type: facetoolType,
|
||||
strength: facetoolStrength,
|
||||
};
|
||||
if (facetoolType === 'codeformer') {
|
||||
facetoolParameters.codeformer_fidelity = codeformerFidelity;
|
||||
}
|
||||
}
|
||||
|
||||
if (enableImageDebugging) {
|
||||
generationParameters.enable_image_debugging = enableImageDebugging;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import { useAppDispatch } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import IAIAlertDialog from 'common/components/IAIAlertDialog';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import { clearCanvasHistory } from 'features/canvas/store/canvasSlice';
|
||||
import { FaTrash } from 'react-icons/fa';
|
||||
import { isStagingSelector } from '../store/canvasSelectors';
|
||||
|
||||
const ClearCanvasHistoryButtonModal = () => {
|
||||
const isStaging = useAppSelector(isStagingSelector);
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
return (
|
||||
@@ -13,7 +15,7 @@ const ClearCanvasHistoryButtonModal = () => {
|
||||
acceptCallback={() => dispatch(clearCanvasHistory())}
|
||||
acceptButtonText={'Clear History'}
|
||||
triggerComponent={
|
||||
<IAIButton size={'sm'} leftIcon={<FaTrash />}>
|
||||
<IAIButton size={'sm'} leftIcon={<FaTrash />} isDisabled={isStaging}>
|
||||
Clear Canvas History
|
||||
</IAIButton>
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import useCanvasWheel from '../hooks/useCanvasZoom';
|
||||
import useCanvasMouseDown from '../hooks/useCanvasMouseDown';
|
||||
import useCanvasMouseUp from '../hooks/useCanvasMouseUp';
|
||||
import useCanvasMouseMove from '../hooks/useCanvasMouseMove';
|
||||
import useCanvasMouseEnter from '../hooks/useCanvasMouseEnter';
|
||||
import useCanvasMouseOut from '../hooks/useCanvasMouseOut';
|
||||
import useCanvasDragMove from '../hooks/useCanvasDragMove';
|
||||
import IAICanvasObjectRenderer from './IAICanvasObjectRenderer';
|
||||
@@ -31,6 +30,8 @@ import {
|
||||
setCanvasBaseLayer,
|
||||
setCanvasStage,
|
||||
} from '../util/konvaInstanceProvider';
|
||||
import { KonvaEventObject } from 'konva/lib/Node';
|
||||
import IAICanvasBoundingBoxOverlay from './IAICanvasBoundingBoxOverlay';
|
||||
|
||||
const selector = createSelector(
|
||||
[canvasSelector, isStagingSelector],
|
||||
@@ -48,9 +49,10 @@ const selector = createSelector(
|
||||
isMovingStage,
|
||||
shouldShowIntermediates,
|
||||
shouldShowGrid,
|
||||
shouldRestrictStrokesToBox,
|
||||
} = canvas;
|
||||
|
||||
let stageCursor: string | undefined = '';
|
||||
let stageCursor: string | undefined = 'none';
|
||||
|
||||
if (tool === 'move' || isStaging) {
|
||||
if (isMovingStage) {
|
||||
@@ -60,10 +62,8 @@ const selector = createSelector(
|
||||
}
|
||||
} else if (isTransformingBoundingBox) {
|
||||
stageCursor = undefined;
|
||||
} else if (isMouseOverBoundingBox) {
|
||||
stageCursor = 'move';
|
||||
} else {
|
||||
stageCursor = 'none';
|
||||
} else if (shouldRestrictStrokesToBox && !isMouseOverBoundingBox) {
|
||||
stageCursor = 'default';
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -129,7 +129,6 @@ const IAICanvas = () => {
|
||||
didMouseMoveRef,
|
||||
lastCursorPositionRef
|
||||
);
|
||||
const handleMouseEnter = useCanvasMouseEnter(stageRef);
|
||||
const handleMouseOut = useCanvasMouseOut();
|
||||
const { handleDragStart, handleDragMove, handleDragEnd } =
|
||||
useCanvasDragMove();
|
||||
@@ -153,16 +152,16 @@ const IAICanvas = () => {
|
||||
onTouchMove={handleMouseMove}
|
||||
onTouchEnd={handleMouseUp}
|
||||
onMouseDown={handleMouseDown}
|
||||
onMouseEnter={handleMouseEnter}
|
||||
onMouseLeave={handleMouseOut}
|
||||
onMouseMove={handleMouseMove}
|
||||
onMouseOut={handleMouseOut}
|
||||
onMouseUp={handleMouseUp}
|
||||
onDragStart={handleDragStart}
|
||||
onDragMove={handleDragMove}
|
||||
onDragEnd={handleDragEnd}
|
||||
onContextMenu={(e: KonvaEventObject<MouseEvent>) =>
|
||||
e.evt.preventDefault()
|
||||
}
|
||||
onWheel={handleWheel}
|
||||
listening={(tool === 'move' || isStaging) && !isModifyingBoundingBox}
|
||||
draggable={(tool === 'move' || isStaging) && !isModifyingBoundingBox}
|
||||
>
|
||||
<Layer id={'grid'} visible={shouldShowGrid}>
|
||||
@@ -181,6 +180,9 @@ const IAICanvas = () => {
|
||||
<IAICanvasMaskLines visible={true} listening={false} />
|
||||
<IAICanvasMaskCompositer listening={false} />
|
||||
</Layer>
|
||||
<Layer>
|
||||
<IAICanvasBoundingBoxOverlay />
|
||||
</Layer>
|
||||
<Layer id="preview" imageSmoothingEnabled={false}>
|
||||
{!isStaging && (
|
||||
<IAICanvasToolPreview
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store';
|
||||
import _ from 'lodash';
|
||||
import { Group, Rect } from 'react-konva';
|
||||
import { canvasSelector } from '../store/canvasSelectors';
|
||||
|
||||
const selector = createSelector(
|
||||
canvasSelector,
|
||||
(canvas) => {
|
||||
const {
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
stageDimensions,
|
||||
stageScale,
|
||||
shouldDarkenOutsideBoundingBox,
|
||||
stageCoordinates,
|
||||
} = canvas;
|
||||
|
||||
return {
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
shouldDarkenOutsideBoundingBox,
|
||||
stageCoordinates,
|
||||
stageDimensions,
|
||||
stageScale,
|
||||
};
|
||||
},
|
||||
{
|
||||
memoizeOptions: {
|
||||
resultEqualityCheck: _.isEqual,
|
||||
},
|
||||
}
|
||||
);
|
||||
const IAICanvasBoundingBoxOverlay = () => {
|
||||
const {
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
shouldDarkenOutsideBoundingBox,
|
||||
stageCoordinates,
|
||||
stageDimensions,
|
||||
stageScale,
|
||||
} = useAppSelector(selector);
|
||||
|
||||
return (
|
||||
<Group>
|
||||
<Rect
|
||||
offsetX={stageCoordinates.x / stageScale}
|
||||
offsetY={stageCoordinates.y / stageScale}
|
||||
height={stageDimensions.height / stageScale}
|
||||
width={stageDimensions.width / stageScale}
|
||||
fill={'rgba(0,0,0,0.4)'}
|
||||
listening={false}
|
||||
visible={shouldDarkenOutsideBoundingBox}
|
||||
/>
|
||||
<Rect
|
||||
x={boundingBoxCoordinates.x}
|
||||
y={boundingBoxCoordinates.y}
|
||||
width={boundingBoxDimensions.width}
|
||||
height={boundingBoxDimensions.height}
|
||||
fill={'rgb(255,255,255)'}
|
||||
listening={false}
|
||||
visible={shouldDarkenOutsideBoundingBox}
|
||||
globalCompositeOperation={'destination-out'}
|
||||
/>
|
||||
</Group>
|
||||
);
|
||||
};
|
||||
|
||||
export default IAICanvasBoundingBoxOverlay;
|
||||
@@ -1,8 +1,13 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store';
|
||||
import _ from 'lodash';
|
||||
import { Group, Line } from 'react-konva';
|
||||
import { isCanvasBaseImage, isCanvasBaseLine } from '../store/canvasTypes';
|
||||
import { Group, Line, Rect } from 'react-konva';
|
||||
import {
|
||||
isCanvasBaseImage,
|
||||
isCanvasBaseLine,
|
||||
isCanvasEraseRect,
|
||||
isCanvasFillRect,
|
||||
} from '../store/canvasTypes';
|
||||
import IAICanvasImage from './IAICanvasImage';
|
||||
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
@@ -37,7 +42,7 @@ const IAICanvasObjectRenderer = () => {
|
||||
<IAICanvasImage key={i} x={obj.x} y={obj.y} url={obj.image.url} />
|
||||
);
|
||||
} else if (isCanvasBaseLine(obj)) {
|
||||
return (
|
||||
const line = (
|
||||
<Line
|
||||
key={i}
|
||||
points={obj.points}
|
||||
@@ -53,6 +58,44 @@ const IAICanvasObjectRenderer = () => {
|
||||
}
|
||||
/>
|
||||
);
|
||||
if (obj.clip) {
|
||||
return (
|
||||
<Group
|
||||
key={i}
|
||||
clipX={obj.clip.x}
|
||||
clipY={obj.clip.y}
|
||||
clipWidth={obj.clip.width}
|
||||
clipHeight={obj.clip.height}
|
||||
>
|
||||
{line}
|
||||
</Group>
|
||||
);
|
||||
} else {
|
||||
return line;
|
||||
}
|
||||
} else if (isCanvasFillRect(obj)) {
|
||||
return (
|
||||
<Rect
|
||||
key={i}
|
||||
x={obj.x}
|
||||
y={obj.y}
|
||||
width={obj.width}
|
||||
height={obj.height}
|
||||
fill={rgbaColorToString(obj.color)}
|
||||
/>
|
||||
);
|
||||
} else if (isCanvasEraseRect(obj)) {
|
||||
return (
|
||||
<Rect
|
||||
key={i}
|
||||
x={obj.x}
|
||||
y={obj.y}
|
||||
width={obj.width}
|
||||
height={obj.height}
|
||||
fill={'rgb(255, 255, 255)'}
|
||||
globalCompositeOperation={'destination-out'}
|
||||
/>
|
||||
);
|
||||
}
|
||||
})}
|
||||
</Group>
|
||||
|
||||
@@ -15,6 +15,8 @@ const selector = createSelector(
|
||||
},
|
||||
shouldShowStagingImage,
|
||||
shouldShowStagingOutline,
|
||||
boundingBoxCoordinates: { x, y },
|
||||
boundingBoxDimensions: { width, height },
|
||||
} = canvas;
|
||||
|
||||
return {
|
||||
@@ -24,6 +26,10 @@ const selector = createSelector(
|
||||
isOnLastImage: selectedImageIndex === images.length - 1,
|
||||
shouldShowStagingImage,
|
||||
shouldShowStagingOutline,
|
||||
x,
|
||||
y,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
},
|
||||
{
|
||||
@@ -41,19 +47,17 @@ const IAICanvasStagingArea = (props: Props) => {
|
||||
currentStagingAreaImage,
|
||||
shouldShowStagingImage,
|
||||
shouldShowStagingOutline,
|
||||
} = useAppSelector(selector);
|
||||
|
||||
if (!currentStagingAreaImage) return null;
|
||||
|
||||
const {
|
||||
x,
|
||||
y,
|
||||
image: { width, height, url },
|
||||
} = currentStagingAreaImage;
|
||||
width,
|
||||
height,
|
||||
} = useAppSelector(selector);
|
||||
|
||||
return (
|
||||
<Group {...rest}>
|
||||
{shouldShowStagingImage && <IAICanvasImage url={url} x={x} y={y} />}
|
||||
{shouldShowStagingImage && currentStagingAreaImage && (
|
||||
<IAICanvasImage url={currentStagingAreaImage.image.url} x={x} y={y} />
|
||||
)}
|
||||
{shouldShowStagingOutline && (
|
||||
<Group>
|
||||
<Rect
|
||||
@@ -62,7 +66,7 @@ const IAICanvasStagingArea = (props: Props) => {
|
||||
width={width}
|
||||
height={height}
|
||||
strokeWidth={1}
|
||||
stroke={'black'}
|
||||
stroke={'white'}
|
||||
strokeScaleEnabled={false}
|
||||
/>
|
||||
<Rect
|
||||
@@ -72,7 +76,7 @@ const IAICanvasStagingArea = (props: Props) => {
|
||||
height={height}
|
||||
dash={[4, 4]}
|
||||
strokeWidth={1}
|
||||
stroke={'white'}
|
||||
stroke={'black'}
|
||||
strokeScaleEnabled={false}
|
||||
/>
|
||||
</Group>
|
||||
|
||||
@@ -10,8 +10,8 @@ import {
|
||||
FaCheck,
|
||||
FaEye,
|
||||
FaEyeSlash,
|
||||
FaPlus,
|
||||
FaSave,
|
||||
FaTrash,
|
||||
} from 'react-icons/fa';
|
||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
@@ -62,11 +62,11 @@ const IAICanvasStagingAreaToolbar = () => {
|
||||
} = useAppSelector(selector);
|
||||
|
||||
const handleMouseOver = useCallback(() => {
|
||||
dispatch(setShouldShowStagingOutline(false));
|
||||
dispatch(setShouldShowStagingOutline(true));
|
||||
}, [dispatch]);
|
||||
|
||||
const handleMouseOut = useCallback(() => {
|
||||
dispatch(setShouldShowStagingOutline(true));
|
||||
dispatch(setShouldShowStagingOutline(false));
|
||||
}, [dispatch]);
|
||||
|
||||
useHotkeys(
|
||||
@@ -167,10 +167,11 @@ const IAICanvasStagingAreaToolbar = () => {
|
||||
<IAIIconButton
|
||||
tooltip="Discard All"
|
||||
aria-label="Discard All"
|
||||
icon={<FaTrash />}
|
||||
icon={<FaPlus style={{ transform: 'rotate(45deg)' }} />}
|
||||
onClick={() => dispatch(discardStagedImages())}
|
||||
data-selected={true}
|
||||
style={{ backgroundColor: 'var(--btn-delete-image)' }}
|
||||
fontSize={20}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
</Flex>
|
||||
|
||||
@@ -15,7 +15,6 @@ const canvasBrushPreviewSelector = createSelector(
|
||||
(canvas) => {
|
||||
const {
|
||||
cursorPosition,
|
||||
stageDimensions: { width, height },
|
||||
brushSize,
|
||||
colorPickerColor,
|
||||
maskColor,
|
||||
@@ -26,12 +25,64 @@ const canvasBrushPreviewSelector = createSelector(
|
||||
isMovingBoundingBox,
|
||||
isTransformingBoundingBox,
|
||||
stageScale,
|
||||
stageDimensions,
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
shouldRestrictStrokesToBox,
|
||||
} = canvas;
|
||||
|
||||
const clip = shouldRestrictStrokesToBox
|
||||
? {
|
||||
clipX: boundingBoxCoordinates.x,
|
||||
clipY: boundingBoxCoordinates.y,
|
||||
clipWidth: boundingBoxDimensions.width,
|
||||
clipHeight: boundingBoxDimensions.height,
|
||||
}
|
||||
: {};
|
||||
|
||||
// // big brain time; this is the *inverse* of the clip that is needed for shouldRestrictStrokesToBox
|
||||
// // it took some fiddling to work out, so I am leaving it here in case it is needed for something else...
|
||||
// const clipFunc = shouldRestrictStrokesToBox
|
||||
// ? (ctx: SceneContext) => {
|
||||
// console.log(
|
||||
// stageCoordinates.x / stageScale,
|
||||
// stageCoordinates.y / stageScale,
|
||||
// stageDimensions.height / stageScale,
|
||||
// stageDimensions.width / stageScale
|
||||
// );
|
||||
// ctx.fillStyle = 'red';
|
||||
// ctx.rect(
|
||||
// -stageCoordinates.x / stageScale,
|
||||
// -stageCoordinates.y / stageScale,
|
||||
// stageDimensions.width / stageScale,
|
||||
// stageCoordinates.y / stageScale + boundingBoxCoordinates.y
|
||||
// );
|
||||
// ctx.rect(
|
||||
// -stageCoordinates.x / stageScale,
|
||||
// boundingBoxCoordinates.y + boundingBoxDimensions.height,
|
||||
// stageDimensions.width / stageScale,
|
||||
// stageDimensions.height / stageScale
|
||||
// );
|
||||
// ctx.rect(
|
||||
// -stageCoordinates.x / stageScale,
|
||||
// -stageCoordinates.y / stageScale,
|
||||
// stageCoordinates.x / stageScale + boundingBoxCoordinates.x,
|
||||
// stageDimensions.height / stageScale
|
||||
// );
|
||||
// ctx.rect(
|
||||
// boundingBoxCoordinates.x + boundingBoxDimensions.width,
|
||||
// -stageCoordinates.y / stageScale,
|
||||
// stageDimensions.width / stageScale -
|
||||
// (boundingBoxCoordinates.x + boundingBoxDimensions.width),
|
||||
// stageDimensions.height / stageScale
|
||||
// );
|
||||
// }
|
||||
// : undefined;
|
||||
|
||||
return {
|
||||
cursorPosition,
|
||||
width,
|
||||
height,
|
||||
brushX: cursorPosition ? cursorPosition.x : stageDimensions.width / 2,
|
||||
brushY: cursorPosition ? cursorPosition.y : stageDimensions.height / 2,
|
||||
radius: brushSize / 2,
|
||||
colorPickerOuterRadius: COLOR_PICKER_SIZE / stageScale,
|
||||
colorPickerInnerRadius:
|
||||
@@ -50,6 +101,7 @@ const canvasBrushPreviewSelector = createSelector(
|
||||
) && shouldShowBrush,
|
||||
strokeWidth: 1.5 / stageScale,
|
||||
dotRadius: 1.5 / stageScale,
|
||||
clip,
|
||||
};
|
||||
},
|
||||
{
|
||||
@@ -65,9 +117,8 @@ const canvasBrushPreviewSelector = createSelector(
|
||||
const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
const { ...rest } = props;
|
||||
const {
|
||||
cursorPosition,
|
||||
width,
|
||||
height,
|
||||
brushX,
|
||||
brushY,
|
||||
radius,
|
||||
maskColorString,
|
||||
tool,
|
||||
@@ -79,25 +130,26 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
colorPickerColorString,
|
||||
colorPickerInnerRadius,
|
||||
colorPickerOuterRadius,
|
||||
clip,
|
||||
} = useAppSelector(canvasBrushPreviewSelector);
|
||||
|
||||
if (!shouldDrawBrushPreview) return null;
|
||||
|
||||
return (
|
||||
<Group listening={false} {...rest}>
|
||||
<Group listening={false} {...clip} {...rest}>
|
||||
{tool === 'colorPicker' ? (
|
||||
<>
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={colorPickerOuterRadius}
|
||||
stroke={brushColorString}
|
||||
strokeWidth={COLOR_PICKER_STROKE_RADIUS}
|
||||
strokeScaleEnabled={false}
|
||||
/>
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={colorPickerInnerRadius}
|
||||
stroke={colorPickerColorString}
|
||||
strokeWidth={COLOR_PICKER_STROKE_RADIUS}
|
||||
@@ -107,17 +159,17 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
) : (
|
||||
<>
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={radius}
|
||||
fill={layer === 'mask' ? maskColorString : brushColorString}
|
||||
globalCompositeOperation={
|
||||
tool === 'eraser' ? 'destination-out' : 'source-over'
|
||||
tool === 'eraser' ? 'destination-out' : 'source-out'
|
||||
}
|
||||
/>
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={radius}
|
||||
stroke={'rgba(255,255,255,0.4)'}
|
||||
strokeWidth={strokeWidth * 2}
|
||||
@@ -125,8 +177,8 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
listening={false}
|
||||
/>
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={radius}
|
||||
stroke={'rgba(0,0,0,1)'}
|
||||
strokeWidth={strokeWidth}
|
||||
@@ -136,15 +188,15 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
</>
|
||||
)}
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={dotRadius * 2}
|
||||
fill={'rgba(255,255,255,0.4)'}
|
||||
listening={false}
|
||||
/>
|
||||
<Circle
|
||||
x={cursorPosition ? cursorPosition.x : width / 2}
|
||||
y={cursorPosition ? cursorPosition.y : height / 2}
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={dotRadius}
|
||||
fill={'rgba(0,0,0,1)'}
|
||||
listening={false}
|
||||
|
||||
@@ -3,7 +3,7 @@ import Konva from 'konva';
|
||||
import { KonvaEventObject } from 'konva/lib/Node';
|
||||
import { Vector2d } from 'konva/lib/types';
|
||||
import _ from 'lodash';
|
||||
import { useCallback, useEffect, useRef } from 'react';
|
||||
import { useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { Group, Rect, Transformer } from 'react-konva';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import {
|
||||
@@ -26,15 +26,11 @@ const boundingBoxPreviewSelector = createSelector(
|
||||
const {
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
stageDimensions,
|
||||
stageScale,
|
||||
isDrawing,
|
||||
isTransformingBoundingBox,
|
||||
isMovingBoundingBox,
|
||||
isMouseOverBoundingBox,
|
||||
shouldDarkenOutsideBoundingBox,
|
||||
tool,
|
||||
stageCoordinates,
|
||||
shouldSnapToGrid,
|
||||
} = canvas;
|
||||
|
||||
@@ -42,16 +38,11 @@ const boundingBoxPreviewSelector = createSelector(
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
isDrawing,
|
||||
isMouseOverBoundingBox,
|
||||
shouldDarkenOutsideBoundingBox,
|
||||
isMovingBoundingBox,
|
||||
isTransformingBoundingBox,
|
||||
stageDimensions,
|
||||
stageScale,
|
||||
shouldSnapToGrid,
|
||||
tool,
|
||||
stageCoordinates,
|
||||
boundingBoxStrokeWidth: (isMouseOverBoundingBox ? 8 : 1) / stageScale,
|
||||
hitStrokeWidth: 20 / stageScale,
|
||||
};
|
||||
},
|
||||
@@ -72,22 +63,20 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
|
||||
boundingBoxCoordinates,
|
||||
boundingBoxDimensions,
|
||||
isDrawing,
|
||||
isMouseOverBoundingBox,
|
||||
shouldDarkenOutsideBoundingBox,
|
||||
isMovingBoundingBox,
|
||||
isTransformingBoundingBox,
|
||||
stageCoordinates,
|
||||
stageDimensions,
|
||||
stageScale,
|
||||
shouldSnapToGrid,
|
||||
tool,
|
||||
boundingBoxStrokeWidth,
|
||||
hitStrokeWidth,
|
||||
} = useAppSelector(boundingBoxPreviewSelector);
|
||||
|
||||
const transformerRef = useRef<Konva.Transformer>(null);
|
||||
const shapeRef = useRef<Konva.Rect>(null);
|
||||
|
||||
const [isMouseOverBoundingBoxOutline, setIsMouseOverBoundingBoxOutline] =
|
||||
useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (!transformerRef.current || !shapeRef.current) return;
|
||||
transformerRef.current.nodes([shapeRef.current]);
|
||||
@@ -205,7 +194,9 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
|
||||
|
||||
const handleEndedTransforming = () => {
|
||||
dispatch(setIsTransformingBoundingBox(false));
|
||||
dispatch(setIsMovingBoundingBox(false));
|
||||
dispatch(setIsMouseOverBoundingBox(false));
|
||||
setIsMouseOverBoundingBoxOutline(false);
|
||||
};
|
||||
|
||||
const handleStartedMoving = () => {
|
||||
@@ -216,38 +207,38 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
|
||||
dispatch(setIsTransformingBoundingBox(false));
|
||||
dispatch(setIsMovingBoundingBox(false));
|
||||
dispatch(setIsMouseOverBoundingBox(false));
|
||||
setIsMouseOverBoundingBoxOutline(false);
|
||||
};
|
||||
|
||||
const handleMouseOver = () => {
|
||||
dispatch(setIsMouseOverBoundingBox(true));
|
||||
setIsMouseOverBoundingBoxOutline(true);
|
||||
};
|
||||
|
||||
const handleMouseOut = () => {
|
||||
!isTransformingBoundingBox &&
|
||||
!isMovingBoundingBox &&
|
||||
dispatch(setIsMouseOverBoundingBox(false));
|
||||
setIsMouseOverBoundingBoxOutline(false);
|
||||
};
|
||||
|
||||
const handleMouseEnterBoundingBox = () => {
|
||||
dispatch(setIsMouseOverBoundingBox(true));
|
||||
};
|
||||
|
||||
const handleMouseLeaveBoundingBox = () => {
|
||||
dispatch(setIsMouseOverBoundingBox(false));
|
||||
};
|
||||
|
||||
return (
|
||||
<Group {...rest}>
|
||||
<Rect
|
||||
offsetX={stageCoordinates.x / stageScale}
|
||||
offsetY={stageCoordinates.y / stageScale}
|
||||
height={stageDimensions.height / stageScale}
|
||||
width={stageDimensions.width / stageScale}
|
||||
fill={'rgba(0,0,0,0.4)'}
|
||||
listening={false}
|
||||
visible={shouldDarkenOutsideBoundingBox}
|
||||
/>
|
||||
<Rect
|
||||
height={boundingBoxDimensions.height}
|
||||
width={boundingBoxDimensions.width}
|
||||
x={boundingBoxCoordinates.x}
|
||||
y={boundingBoxCoordinates.y}
|
||||
width={boundingBoxDimensions.width}
|
||||
height={boundingBoxDimensions.height}
|
||||
fill={'rgb(255,255,255)'}
|
||||
listening={false}
|
||||
visible={shouldDarkenOutsideBoundingBox}
|
||||
globalCompositeOperation={'destination-out'}
|
||||
onMouseEnter={handleMouseEnterBoundingBox}
|
||||
onMouseOver={handleMouseEnterBoundingBox}
|
||||
onMouseLeave={handleMouseLeaveBoundingBox}
|
||||
onMouseOut={handleMouseLeaveBoundingBox}
|
||||
/>
|
||||
<Rect
|
||||
draggable={true}
|
||||
@@ -255,17 +246,21 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
|
||||
height={boundingBoxDimensions.height}
|
||||
hitStrokeWidth={hitStrokeWidth}
|
||||
listening={!isDrawing && tool === 'move'}
|
||||
onDragStart={handleStartedMoving}
|
||||
onDragEnd={handleEndedModifying}
|
||||
onDragMove={handleOnDragMove}
|
||||
onMouseDown={handleStartedMoving}
|
||||
onMouseOut={handleMouseOut}
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseEnter={handleMouseOver}
|
||||
onMouseUp={handleEndedModifying}
|
||||
onTransform={handleOnTransform}
|
||||
onTransformEnd={handleEndedTransforming}
|
||||
ref={shapeRef}
|
||||
stroke={isMouseOverBoundingBox ? 'rgba(255,255,255,0.7)' : 'white'}
|
||||
strokeWidth={boundingBoxStrokeWidth}
|
||||
stroke={
|
||||
isMouseOverBoundingBoxOutline ? 'rgba(255,255,255,0.7)' : 'white'
|
||||
}
|
||||
strokeWidth={(isMouseOverBoundingBoxOutline ? 8 : 1) / stageScale}
|
||||
width={boundingBoxDimensions.width}
|
||||
x={boundingBoxCoordinates.x}
|
||||
y={boundingBoxCoordinates.y}
|
||||
@@ -285,6 +280,7 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
|
||||
ignoreStroke={true}
|
||||
keepRatio={false}
|
||||
listening={!isDrawing && tool === 'move'}
|
||||
onDragStart={handleStartedMoving}
|
||||
onDragEnd={handleEndedModifying}
|
||||
onMouseDown={handleStartedTransforming}
|
||||
onMouseUp={handleEndedTransforming}
|
||||
|
||||
@@ -15,13 +15,16 @@ import IAIPopover from 'common/components/IAIPopover';
|
||||
import IAICheckbox from 'common/components/IAICheckbox';
|
||||
import IAIColorPicker from 'common/components/IAIColorPicker';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
canvasSelector,
|
||||
isStagingSelector,
|
||||
} from 'features/canvas/store/canvasSelectors';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
||||
|
||||
export const selector = createSelector(
|
||||
[canvasSelector],
|
||||
(canvas) => {
|
||||
[canvasSelector, isStagingSelector],
|
||||
(canvas, isStaging) => {
|
||||
const { maskColor, layer, isMaskEnabled, shouldPreserveMaskedArea } =
|
||||
canvas;
|
||||
|
||||
@@ -31,6 +34,7 @@ export const selector = createSelector(
|
||||
maskColorString: rgbaColorToString(maskColor),
|
||||
isMaskEnabled,
|
||||
shouldPreserveMaskedArea,
|
||||
isStaging,
|
||||
};
|
||||
},
|
||||
{
|
||||
@@ -41,8 +45,13 @@ export const selector = createSelector(
|
||||
);
|
||||
const IAICanvasMaskOptions = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { layer, maskColor, isMaskEnabled, shouldPreserveMaskedArea } =
|
||||
useAppSelector(selector);
|
||||
const {
|
||||
layer,
|
||||
maskColor,
|
||||
isMaskEnabled,
|
||||
shouldPreserveMaskedArea,
|
||||
isStaging,
|
||||
} = useAppSelector(selector);
|
||||
|
||||
useHotkeys(
|
||||
['q'],
|
||||
@@ -50,7 +59,7 @@ const IAICanvasMaskOptions = () => {
|
||||
handleToggleMaskLayer();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[layer]
|
||||
@@ -62,7 +71,7 @@ const IAICanvasMaskOptions = () => {
|
||||
handleClearMask();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[]
|
||||
@@ -74,7 +83,7 @@ const IAICanvasMaskOptions = () => {
|
||||
handleToggleEnableMask();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[isMaskEnabled]
|
||||
@@ -103,6 +112,7 @@ const IAICanvasMaskOptions = () => {
|
||||
? { backgroundColor: 'var(--accent-color)' }
|
||||
: { backgroundColor: 'var(--btn-base-color)' }
|
||||
}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
}
|
||||
|
||||
@@ -8,14 +8,15 @@ import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
|
||||
import _ from 'lodash';
|
||||
import { redo } from 'features/canvas/store/canvasSlice';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
|
||||
const canvasRedoSelector = createSelector(
|
||||
[canvasSelector, activeTabNameSelector],
|
||||
(canvas, activeTabName) => {
|
||||
[canvasSelector, activeTabNameSelector, systemSelector],
|
||||
(canvas, activeTabName, system) => {
|
||||
const { futureLayerStates } = canvas;
|
||||
|
||||
return {
|
||||
canRedo: futureLayerStates.length > 0,
|
||||
canRedo: futureLayerStates.length > 0 && !system.isProcessing,
|
||||
activeTabName,
|
||||
};
|
||||
},
|
||||
|
||||
@@ -4,6 +4,7 @@ import {
|
||||
setShouldAutoSave,
|
||||
setShouldCropToBoundingBoxOnSave,
|
||||
setShouldDarkenOutsideBoundingBox,
|
||||
setShouldRestrictStrokesToBox,
|
||||
setShouldShowCanvasDebugInfo,
|
||||
setShouldShowGrid,
|
||||
setShouldShowIntermediates,
|
||||
@@ -32,6 +33,7 @@ export const canvasControlsSelector = createSelector(
|
||||
shouldShowGrid,
|
||||
shouldShowIntermediates,
|
||||
shouldSnapToGrid,
|
||||
shouldRestrictStrokesToBox,
|
||||
} = canvas;
|
||||
|
||||
return {
|
||||
@@ -42,6 +44,7 @@ export const canvasControlsSelector = createSelector(
|
||||
shouldShowGrid,
|
||||
shouldShowIntermediates,
|
||||
shouldSnapToGrid,
|
||||
shouldRestrictStrokesToBox,
|
||||
};
|
||||
},
|
||||
{
|
||||
@@ -61,6 +64,7 @@ const IAICanvasSettingsButtonPopover = () => {
|
||||
shouldShowGrid,
|
||||
shouldShowIntermediates,
|
||||
shouldSnapToGrid,
|
||||
shouldRestrictStrokesToBox,
|
||||
} = useAppSelector(canvasControlsSelector);
|
||||
|
||||
useHotkeys(
|
||||
@@ -126,6 +130,13 @@ const IAICanvasSettingsButtonPopover = () => {
|
||||
dispatch(setShouldCropToBoundingBoxOnSave(e.target.checked))
|
||||
}
|
||||
/>
|
||||
<IAICheckbox
|
||||
label="Limit Strokes to Box"
|
||||
isChecked={shouldRestrictStrokesToBox}
|
||||
onChange={(e) =>
|
||||
dispatch(setShouldRestrictStrokesToBox(e.target.checked))
|
||||
}
|
||||
/>
|
||||
<IAICheckbox
|
||||
label="Show Canvas Debug Info"
|
||||
isChecked={shouldShowCanvasDebugInfo}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { ButtonGroup, Flex } from '@chakra-ui/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import {
|
||||
addEraseRect,
|
||||
addFillRect,
|
||||
setBrushColor,
|
||||
setBrushSize,
|
||||
setTool,
|
||||
@@ -11,7 +13,9 @@ import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import {
|
||||
FaEraser,
|
||||
FaEyeDropper,
|
||||
FaFillDrip,
|
||||
FaPaintBrush,
|
||||
FaPlus,
|
||||
FaSlidersH,
|
||||
} from 'react-icons/fa';
|
||||
import {
|
||||
@@ -55,7 +59,7 @@ const IAICanvasToolChooserOptions = () => {
|
||||
handleSelectBrushTool();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[]
|
||||
@@ -67,7 +71,7 @@ const IAICanvasToolChooserOptions = () => {
|
||||
handleSelectEraserTool();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[tool]
|
||||
@@ -79,19 +83,41 @@ const IAICanvasToolChooserOptions = () => {
|
||||
handleSelectColorPickerTool();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[tool]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
['shift+f'],
|
||||
() => {
|
||||
handleFillRect();
|
||||
},
|
||||
{
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
}
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
['delete', 'backspace'],
|
||||
() => {
|
||||
handleEraseBoundingBox();
|
||||
},
|
||||
{
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
}
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
['BracketLeft'],
|
||||
() => {
|
||||
dispatch(setBrushSize(Math.max(brushSize - 5, 5)));
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[brushSize]
|
||||
@@ -103,7 +129,7 @@ const IAICanvasToolChooserOptions = () => {
|
||||
dispatch(setBrushSize(Math.min(brushSize + 5, 500)));
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[brushSize]
|
||||
@@ -120,7 +146,7 @@ const IAICanvasToolChooserOptions = () => {
|
||||
);
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[brushColor]
|
||||
@@ -137,7 +163,7 @@ const IAICanvasToolChooserOptions = () => {
|
||||
);
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[brushColor]
|
||||
@@ -146,6 +172,8 @@ const IAICanvasToolChooserOptions = () => {
|
||||
const handleSelectBrushTool = () => dispatch(setTool('brush'));
|
||||
const handleSelectEraserTool = () => dispatch(setTool('eraser'));
|
||||
const handleSelectColorPickerTool = () => dispatch(setTool('colorPicker'));
|
||||
const handleFillRect = () => dispatch(addFillRect());
|
||||
const handleEraseBoundingBox = () => dispatch(addEraseRect());
|
||||
|
||||
return (
|
||||
<ButtonGroup isAttached>
|
||||
@@ -165,6 +193,20 @@ const IAICanvasToolChooserOptions = () => {
|
||||
isDisabled={isStaging}
|
||||
onClick={handleSelectEraserTool}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Fill Bounding Box (Shift+F)"
|
||||
tooltip="Fill Bounding Box (Shift+F)"
|
||||
icon={<FaFillDrip />}
|
||||
isDisabled={isStaging}
|
||||
onClick={handleFillRect}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Erase Bounding Box Area (Delete/Backspace)"
|
||||
tooltip="Erase Bounding Box Area (Delete/Backspace)"
|
||||
icon={<FaPlus style={{ transform: 'rotate(45deg)' }} />}
|
||||
isDisabled={isStaging}
|
||||
onClick={handleEraseBoundingBox}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Color Picker (C)"
|
||||
tooltip="Color Picker (C)"
|
||||
|
||||
@@ -85,7 +85,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
handleSelectMoveTool();
|
||||
},
|
||||
{
|
||||
enabled: () => true,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[]
|
||||
@@ -109,7 +109,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
handleMergeVisible();
|
||||
},
|
||||
{
|
||||
enabled: () => !isProcessing,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[canvasBaseLayer, isProcessing]
|
||||
@@ -121,7 +121,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
handleSaveToGallery();
|
||||
},
|
||||
{
|
||||
enabled: () => !isProcessing,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[canvasBaseLayer, isProcessing]
|
||||
@@ -133,7 +133,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
handleCopyImageToClipboard();
|
||||
},
|
||||
{
|
||||
enabled: () => !isProcessing,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[canvasBaseLayer, isProcessing]
|
||||
@@ -145,7 +145,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
handleDownloadAsImage();
|
||||
},
|
||||
{
|
||||
enabled: () => !isProcessing,
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[canvasBaseLayer, isProcessing]
|
||||
@@ -226,6 +226,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
value={layer}
|
||||
validValues={LAYER_NAMES_DICT}
|
||||
onChange={handleChangeLayer}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
|
||||
<IAICanvasMaskOptions />
|
||||
@@ -253,28 +254,28 @@ const IAICanvasOutpaintingControls = () => {
|
||||
tooltip="Merge Visible (Shift+M)"
|
||||
icon={<FaLayerGroup />}
|
||||
onClick={handleMergeVisible}
|
||||
isDisabled={isProcessing}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Save to Gallery (Shift+S)"
|
||||
tooltip="Save to Gallery (Shift+S)"
|
||||
icon={<FaSave />}
|
||||
onClick={handleSaveToGallery}
|
||||
isDisabled={isProcessing}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Copy to Clipboard (Cmd/Ctrl+C)"
|
||||
tooltip="Copy to Clipboard (Cmd/Ctrl+C)"
|
||||
icon={<FaCopy />}
|
||||
onClick={handleCopyImageToClipboard}
|
||||
isDisabled={isProcessing}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Download as Image (Shift+D)"
|
||||
tooltip="Download as Image (Shift+D)"
|
||||
icon={<FaDownload />}
|
||||
onClick={handleDownloadAsImage}
|
||||
isDisabled={isProcessing}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
<ButtonGroup isAttached>
|
||||
@@ -288,6 +289,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
tooltip="Upload"
|
||||
icon={<FaUpload />}
|
||||
onClick={openUploader}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
<IAIIconButton
|
||||
aria-label="Clear Canvas"
|
||||
@@ -295,6 +297,7 @@ const IAICanvasOutpaintingControls = () => {
|
||||
icon={<FaTrash />}
|
||||
onClick={handleResetCanvas}
|
||||
style={{ backgroundColor: 'var(--btn-delete-image)' }}
|
||||
isDisabled={isStaging}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
<ButtonGroup isAttached>
|
||||
|
||||
@@ -8,14 +8,15 @@ import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import _ from 'lodash';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import { undo } from 'features/canvas/store/canvasSlice';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
|
||||
const canvasUndoSelector = createSelector(
|
||||
[canvasSelector, activeTabNameSelector],
|
||||
(canvas, activeTabName) => {
|
||||
[canvasSelector, activeTabNameSelector, systemSelector],
|
||||
(canvas, activeTabName, system) => {
|
||||
const { pastLayerStates } = canvas;
|
||||
|
||||
return {
|
||||
canUndo: pastLayerStates.length > 0,
|
||||
canUndo: pastLayerStates.length > 0 && !system.isProcessing,
|
||||
activeTabName,
|
||||
};
|
||||
},
|
||||
|
||||
@@ -3,7 +3,10 @@ import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import { KonvaEventObject } from 'konva/lib/Node';
|
||||
import _ from 'lodash';
|
||||
import { useCallback } from 'react';
|
||||
import { canvasSelector, isStagingSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
canvasSelector,
|
||||
isStagingSelector,
|
||||
} from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
setIsMovingStage,
|
||||
setStageCoordinates,
|
||||
@@ -12,10 +15,11 @@ import {
|
||||
const selector = createSelector(
|
||||
[canvasSelector, isStagingSelector],
|
||||
(canvas, isStaging) => {
|
||||
const { tool } = canvas;
|
||||
const { tool, isMovingBoundingBox } = canvas;
|
||||
return {
|
||||
tool,
|
||||
isStaging,
|
||||
isMovingBoundingBox,
|
||||
};
|
||||
},
|
||||
{ memoizeOptions: { resultEqualityCheck: _.isEqual } }
|
||||
@@ -23,29 +27,29 @@ const selector = createSelector(
|
||||
|
||||
const useCanvasDrag = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { tool, isStaging } = useAppSelector(selector);
|
||||
const { tool, isStaging, isMovingBoundingBox } = useAppSelector(selector);
|
||||
|
||||
return {
|
||||
handleDragStart: useCallback(() => {
|
||||
if (!(tool === 'move' || isStaging)) return;
|
||||
if (!((tool === 'move' || isStaging) && !isMovingBoundingBox)) return;
|
||||
dispatch(setIsMovingStage(true));
|
||||
}, [dispatch, isStaging, tool]),
|
||||
}, [dispatch, isMovingBoundingBox, isStaging, tool]),
|
||||
|
||||
handleDragMove: useCallback(
|
||||
(e: KonvaEventObject<MouseEvent>) => {
|
||||
if (!(tool === 'move' || isStaging)) return;
|
||||
if (!((tool === 'move' || isStaging) && !isMovingBoundingBox)) return;
|
||||
|
||||
const newCoordinates = { x: e.target.x(), y: e.target.y() };
|
||||
|
||||
dispatch(setStageCoordinates(newCoordinates));
|
||||
},
|
||||
[dispatch, isStaging, tool]
|
||||
[dispatch, isMovingBoundingBox, isStaging, tool]
|
||||
),
|
||||
|
||||
handleDragEnd: useCallback(() => {
|
||||
if (!(tool === 'move' || isStaging)) return;
|
||||
if (!((tool === 'move' || isStaging) && !isMovingBoundingBox)) return;
|
||||
dispatch(setIsMovingStage(false));
|
||||
}, [dispatch, isStaging, tool]),
|
||||
}, [dispatch, isMovingBoundingBox, isStaging, tool]),
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -9,13 +9,16 @@ import {
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import { useRef } from 'react';
|
||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
canvasSelector,
|
||||
isStagingSelector,
|
||||
} from 'features/canvas/store/canvasSelectors';
|
||||
import { CanvasTool } from '../store/canvasTypes';
|
||||
import { getCanvasStage } from '../util/konvaInstanceProvider';
|
||||
|
||||
const selector = createSelector(
|
||||
[canvasSelector, activeTabNameSelector],
|
||||
(canvas, activeTabName) => {
|
||||
[canvasSelector, activeTabNameSelector, isStagingSelector],
|
||||
(canvas, activeTabName, isStaging) => {
|
||||
const {
|
||||
cursorPosition,
|
||||
shouldLockBoundingBox,
|
||||
@@ -29,6 +32,7 @@ const selector = createSelector(
|
||||
shouldLockBoundingBox,
|
||||
shouldShowBoundingBox,
|
||||
tool,
|
||||
isStaging,
|
||||
};
|
||||
},
|
||||
{
|
||||
@@ -40,7 +44,7 @@ const selector = createSelector(
|
||||
|
||||
const useInpaintingCanvasHotkeys = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { activeTabName, shouldShowBoundingBox, tool } =
|
||||
const { activeTabName, shouldShowBoundingBox, tool, isStaging } =
|
||||
useAppSelector(selector);
|
||||
|
||||
const previousToolRef = useRef<CanvasTool | null>(null);
|
||||
@@ -64,6 +68,7 @@ const useInpaintingCanvasHotkeys = () => {
|
||||
dispatch(setShouldShowBoundingBox(!shouldShowBoundingBox));
|
||||
},
|
||||
{
|
||||
enabled: () => !isStaging,
|
||||
preventDefault: true,
|
||||
},
|
||||
[activeTabName, shouldShowBoundingBox]
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import Konva from 'konva';
|
||||
import { KonvaEventObject } from 'konva/lib/Node';
|
||||
import _ from 'lodash';
|
||||
import { MutableRefObject, useCallback } from 'react';
|
||||
import { canvasSelector, isStagingSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
addLine,
|
||||
setIsDrawing,
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
import getScaledCursorPosition from '../util/getScaledCursorPosition';
|
||||
|
||||
const selector = createSelector(
|
||||
[activeTabNameSelector, canvasSelector, isStagingSelector],
|
||||
(activeTabName, canvas, isStaging) => {
|
||||
const { tool } = canvas;
|
||||
return {
|
||||
tool,
|
||||
activeTabName,
|
||||
isStaging,
|
||||
};
|
||||
},
|
||||
{ memoizeOptions: { resultEqualityCheck: _.isEqual } }
|
||||
);
|
||||
|
||||
const useCanvasMouseEnter = (
|
||||
stageRef: MutableRefObject<Konva.Stage | null>
|
||||
) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { tool, isStaging } = useAppSelector(selector);
|
||||
|
||||
return useCallback(
|
||||
(e: KonvaEventObject<MouseEvent>) => {
|
||||
if (e.evt.buttons !== 1) return;
|
||||
|
||||
if (!stageRef.current) return;
|
||||
|
||||
const scaledCursorPosition = getScaledCursorPosition(stageRef.current);
|
||||
|
||||
if (!scaledCursorPosition || tool === 'move' || isStaging) return;
|
||||
|
||||
dispatch(setIsDrawing(true));
|
||||
|
||||
// Add a new line starting from the current cursor position.
|
||||
dispatch(addLine([scaledCursorPosition.x, scaledCursorPosition.y]));
|
||||
},
|
||||
[stageRef, tool, isStaging, dispatch]
|
||||
);
|
||||
};
|
||||
|
||||
export default useCanvasMouseEnter;
|
||||
@@ -1,13 +1,12 @@
|
||||
import { useAppDispatch } from 'app/store';
|
||||
import { useCallback } from 'react';
|
||||
import { setCursorPosition, setIsDrawing } from 'features/canvas/store/canvasSlice';
|
||||
import { mouseLeftCanvas } from 'features/canvas/store/canvasSlice';
|
||||
|
||||
const useCanvasMouseOut = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
return useCallback(() => {
|
||||
dispatch(setCursorPosition(null));
|
||||
dispatch(setIsDrawing(false));
|
||||
dispatch(mouseLeftCanvas());
|
||||
}, [dispatch]);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { RootState } from 'app/store';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
import { CanvasImage, CanvasState, isCanvasBaseImage } from './canvasTypes';
|
||||
|
||||
export const canvasSelector = (state: RootState): CanvasState => state.canvas;
|
||||
|
||||
export const isStagingSelector = (state: RootState): boolean =>
|
||||
state.canvas.layerState.stagingArea.images.length > 0;
|
||||
export const isStagingSelector = createSelector(
|
||||
[canvasSelector, activeTabNameSelector, systemSelector],
|
||||
(canvas, activeTabName, system) =>
|
||||
canvas.layerState.stagingArea.images.length > 0 ||
|
||||
(activeTabName === 'unifiedCanvas' && system.isProcessing)
|
||||
);
|
||||
|
||||
export const initialCanvasImageSelector = (
|
||||
state: RootState
|
||||
|
||||
@@ -12,12 +12,15 @@ import calculateCoordinates from '../util/calculateCoordinates';
|
||||
import calculateScale from '../util/calculateScale';
|
||||
import { STAGE_PADDING_PERCENTAGE } from '../util/constants';
|
||||
import floorCoordinates from '../util/floorCoordinates';
|
||||
import getScaledBoundingBoxDimensions from '../util/getScaledBoundingBoxDimensions';
|
||||
import roundDimensionsTo64 from '../util/roundDimensionsTo64';
|
||||
import {
|
||||
BoundingBoxScale,
|
||||
CanvasImage,
|
||||
CanvasLayer,
|
||||
CanvasLayerState,
|
||||
CanvasBaseLine,
|
||||
CanvasMaskLine,
|
||||
CanvasState,
|
||||
CanvasTool,
|
||||
Dimensions,
|
||||
@@ -29,10 +32,6 @@ import {
|
||||
export const initialLayerState: CanvasLayerState = {
|
||||
objects: [],
|
||||
stagingArea: {
|
||||
x: -1,
|
||||
y: -1,
|
||||
width: -1,
|
||||
height: -1,
|
||||
images: [],
|
||||
selectedImageIndex: -1,
|
||||
},
|
||||
@@ -72,6 +71,7 @@ const initialCanvasState: CanvasState = {
|
||||
shouldDarkenOutsideBoundingBox: false,
|
||||
shouldLockBoundingBox: false,
|
||||
shouldPreserveMaskedArea: false,
|
||||
shouldRestrictStrokesToBox: true,
|
||||
shouldShowBoundingBox: true,
|
||||
shouldShowBrush: true,
|
||||
shouldShowBrushPreview: false,
|
||||
@@ -122,7 +122,7 @@ export const canvasSlice = createSlice({
|
||||
state.brushSize = action.payload;
|
||||
},
|
||||
clearMask: (state) => {
|
||||
state.pastLayerStates.push({ ...state.layerState });
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
state.layerState.objects = state.layerState.objects.filter(
|
||||
(obj) => !isCanvasMaskLine(obj)
|
||||
);
|
||||
@@ -177,10 +177,17 @@ export const canvasSlice = createSlice({
|
||||
),
|
||||
};
|
||||
|
||||
if (state.boundingBoxScaleMethod === 'auto') {
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(
|
||||
newBoundingBoxDimensions
|
||||
);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
|
||||
state.boundingBoxDimensions = newBoundingBoxDimensions;
|
||||
state.boundingBoxCoordinates = newBoundingBoxCoordinates;
|
||||
|
||||
state.pastLayerStates.push(state.layerState);
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
state.layerState = {
|
||||
...initialLayerState,
|
||||
@@ -220,65 +227,13 @@ export const canvasSlice = createSlice({
|
||||
state.stageCoordinates = newCoordinates;
|
||||
state.doesCanvasNeedScaling = true;
|
||||
},
|
||||
setStageDimensions: (state, action: PayloadAction<Dimensions>) => {
|
||||
state.stageDimensions = action.payload;
|
||||
|
||||
const { width: canvasWidth, height: canvasHeight } = action.payload;
|
||||
|
||||
const { width: boundingBoxWidth, height: boundingBoxHeight } =
|
||||
state.boundingBoxDimensions;
|
||||
|
||||
const newBoundingBoxWidth = roundDownToMultiple(
|
||||
_.clamp(boundingBoxWidth, 64, canvasWidth / state.stageScale),
|
||||
64
|
||||
);
|
||||
const newBoundingBoxHeight = roundDownToMultiple(
|
||||
_.clamp(boundingBoxHeight, 64, canvasHeight / state.stageScale),
|
||||
64
|
||||
);
|
||||
|
||||
state.boundingBoxDimensions = {
|
||||
width: newBoundingBoxWidth,
|
||||
height: newBoundingBoxHeight,
|
||||
};
|
||||
},
|
||||
setBoundingBoxDimensions: (state, action: PayloadAction<Dimensions>) => {
|
||||
const newDimensions = roundDimensionsTo64(action.payload);
|
||||
state.boundingBoxDimensions = newDimensions;
|
||||
|
||||
if (state.boundingBoxScaleMethod === 'auto') {
|
||||
const { width, height } = newDimensions;
|
||||
const newScaledDimensions = { width, height };
|
||||
const targetArea = 512 * 512;
|
||||
const aspectRatio = width / height;
|
||||
let currentArea = width * height;
|
||||
let maxDimension = 448;
|
||||
while (currentArea < targetArea) {
|
||||
maxDimension += 64;
|
||||
if (width === height) {
|
||||
newScaledDimensions.width = 512;
|
||||
newScaledDimensions.height = 512;
|
||||
break;
|
||||
} else {
|
||||
if (aspectRatio > 1) {
|
||||
newScaledDimensions.width = maxDimension;
|
||||
newScaledDimensions.height = roundToMultiple(
|
||||
maxDimension / aspectRatio,
|
||||
64
|
||||
);
|
||||
} else if (aspectRatio < 1) {
|
||||
newScaledDimensions.height = maxDimension;
|
||||
newScaledDimensions.width = roundToMultiple(
|
||||
maxDimension * aspectRatio,
|
||||
64
|
||||
);
|
||||
}
|
||||
currentArea =
|
||||
newScaledDimensions.width * newScaledDimensions.height;
|
||||
}
|
||||
}
|
||||
|
||||
state.scaledBoundingBoxDimensions = newScaledDimensions;
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(newDimensions);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
},
|
||||
setBoundingBoxCoordinates: (state, action: PayloadAction<Vector2d>) => {
|
||||
@@ -381,9 +336,49 @@ export const canvasSlice = createSlice({
|
||||
|
||||
state.futureLayerStates = [];
|
||||
state.shouldShowStagingOutline = true;
|
||||
state.shouldShowStagingOutline = true;
|
||||
},
|
||||
addFillRect: (state) => {
|
||||
const { boundingBoxCoordinates, boundingBoxDimensions, brushColor } =
|
||||
state;
|
||||
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
if (state.pastLayerStates.length > state.maxHistory) {
|
||||
state.pastLayerStates.shift();
|
||||
}
|
||||
|
||||
state.layerState.objects.push({
|
||||
kind: 'fillRect',
|
||||
layer: 'base',
|
||||
...boundingBoxCoordinates,
|
||||
...boundingBoxDimensions,
|
||||
color: brushColor,
|
||||
});
|
||||
|
||||
state.futureLayerStates = [];
|
||||
},
|
||||
addEraseRect: (state) => {
|
||||
const { boundingBoxCoordinates, boundingBoxDimensions } = state;
|
||||
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
if (state.pastLayerStates.length > state.maxHistory) {
|
||||
state.pastLayerStates.shift();
|
||||
}
|
||||
|
||||
state.layerState.objects.push({
|
||||
kind: 'eraseRect',
|
||||
layer: 'base',
|
||||
...boundingBoxCoordinates,
|
||||
...boundingBoxDimensions,
|
||||
});
|
||||
|
||||
state.futureLayerStates = [];
|
||||
},
|
||||
addLine: (state, action: PayloadAction<number[]>) => {
|
||||
const { tool, layer, brushColor, brushSize } = state;
|
||||
const { tool, layer, brushColor, brushSize, shouldRestrictStrokesToBox } =
|
||||
state;
|
||||
|
||||
if (tool === 'move' || tool === 'colorPicker') return;
|
||||
|
||||
@@ -393,20 +388,29 @@ export const canvasSlice = createSlice({
|
||||
const newColor =
|
||||
layer === 'base' && tool === 'brush' ? { color: brushColor } : {};
|
||||
|
||||
state.pastLayerStates.push(state.layerState);
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
if (state.pastLayerStates.length > state.maxHistory) {
|
||||
state.pastLayerStates.shift();
|
||||
}
|
||||
|
||||
state.layerState.objects.push({
|
||||
const newLine: CanvasMaskLine | CanvasBaseLine = {
|
||||
kind: 'line',
|
||||
layer,
|
||||
tool,
|
||||
strokeWidth: newStrokeWidth,
|
||||
points: action.payload,
|
||||
...newColor,
|
||||
});
|
||||
};
|
||||
|
||||
if (shouldRestrictStrokesToBox) {
|
||||
newLine.clip = {
|
||||
...state.boundingBoxCoordinates,
|
||||
...state.boundingBoxDimensions,
|
||||
};
|
||||
}
|
||||
|
||||
state.layerState.objects.push(newLine);
|
||||
|
||||
state.futureLayerStates = [];
|
||||
},
|
||||
@@ -422,7 +426,7 @@ export const canvasSlice = createSlice({
|
||||
|
||||
if (!targetState) return;
|
||||
|
||||
state.futureLayerStates.unshift(state.layerState);
|
||||
state.futureLayerStates.unshift(_.cloneDeep(state.layerState));
|
||||
|
||||
if (state.futureLayerStates.length > state.maxHistory) {
|
||||
state.futureLayerStates.pop();
|
||||
@@ -435,7 +439,7 @@ export const canvasSlice = createSlice({
|
||||
|
||||
if (!targetState) return;
|
||||
|
||||
state.pastLayerStates.push(state.layerState);
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
if (state.pastLayerStates.length > state.maxHistory) {
|
||||
state.pastLayerStates.shift();
|
||||
@@ -459,7 +463,7 @@ export const canvasSlice = createSlice({
|
||||
state.shouldShowIntermediates = action.payload;
|
||||
},
|
||||
resetCanvas: (state) => {
|
||||
state.pastLayerStates.push(state.layerState);
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
state.layerState = initialLayerState;
|
||||
state.futureLayerStates = [];
|
||||
@@ -501,11 +505,21 @@ export const canvasSlice = createSlice({
|
||||
newScale
|
||||
);
|
||||
|
||||
const newBoundingBoxDimensions = { width: 512, height: 512 };
|
||||
|
||||
state.stageScale = newScale;
|
||||
state.stageCoordinates = newCoordinates;
|
||||
state.stageDimensions = newStageDimensions;
|
||||
state.boundingBoxCoordinates = { x: 0, y: 0 };
|
||||
state.boundingBoxDimensions = { width: 512, height: 512 };
|
||||
state.boundingBoxDimensions = newBoundingBoxDimensions;
|
||||
|
||||
if (state.boundingBoxScaleMethod === 'auto') {
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(
|
||||
newBoundingBoxDimensions
|
||||
);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -568,11 +582,20 @@ export const canvasSlice = createSlice({
|
||||
newScale
|
||||
);
|
||||
|
||||
const newBoundingBoxDimensions = { width: 512, height: 512 };
|
||||
|
||||
state.stageScale = newScale;
|
||||
|
||||
state.stageCoordinates = newCoordinates;
|
||||
state.boundingBoxCoordinates = { x: 0, y: 0 };
|
||||
state.boundingBoxDimensions = { width: 512, height: 512 };
|
||||
state.boundingBoxDimensions = newBoundingBoxDimensions;
|
||||
|
||||
if (state.boundingBoxScaleMethod === 'auto') {
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(
|
||||
newBoundingBoxDimensions
|
||||
);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
}
|
||||
},
|
||||
resetCanvasView: (
|
||||
@@ -628,10 +651,19 @@ export const canvasSlice = createSlice({
|
||||
newScale
|
||||
);
|
||||
|
||||
const newBoundingBoxDimensions = { width: 512, height: 512 };
|
||||
|
||||
state.stageScale = newScale;
|
||||
state.stageCoordinates = newCoordinates;
|
||||
state.boundingBoxCoordinates = { x: 0, y: 0 };
|
||||
state.boundingBoxDimensions = { width: 512, height: 512 };
|
||||
state.boundingBoxDimensions = newBoundingBoxDimensions;
|
||||
|
||||
if (state.boundingBoxScaleMethod === 'auto') {
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(
|
||||
newBoundingBoxDimensions
|
||||
);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
}
|
||||
},
|
||||
nextStagingAreaImage: (state) => {
|
||||
@@ -708,6 +740,13 @@ export const canvasSlice = createSlice({
|
||||
|
||||
state.boundingBoxDimensions = newBoundingBoxDimensions;
|
||||
state.boundingBoxCoordinates = newBoundingBoxCoordinates;
|
||||
|
||||
if (state.boundingBoxScaleMethod === 'auto') {
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(
|
||||
newBoundingBoxDimensions
|
||||
);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
}
|
||||
},
|
||||
setBoundingBoxScaleMethod: (
|
||||
@@ -715,6 +754,13 @@ export const canvasSlice = createSlice({
|
||||
action: PayloadAction<BoundingBoxScale>
|
||||
) => {
|
||||
state.boundingBoxScaleMethod = action.payload;
|
||||
|
||||
if (action.payload === 'auto') {
|
||||
const scaledDimensions = getScaledBoundingBoxDimensions(
|
||||
state.boundingBoxDimensions
|
||||
);
|
||||
state.scaledBoundingBoxDimensions = scaledDimensions;
|
||||
}
|
||||
},
|
||||
setScaledBoundingBoxDimensions: (
|
||||
state,
|
||||
@@ -731,6 +777,9 @@ export const canvasSlice = createSlice({
|
||||
setShouldShowCanvasDebugInfo: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldShowCanvasDebugInfo = action.payload;
|
||||
},
|
||||
setShouldRestrictStrokesToBox: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldRestrictStrokesToBox = action.payload;
|
||||
},
|
||||
setShouldCropToBoundingBoxOnSave: (
|
||||
state,
|
||||
action: PayloadAction<boolean>
|
||||
@@ -748,9 +797,7 @@ export const canvasSlice = createSlice({
|
||||
state.tool = 'brush';
|
||||
},
|
||||
setMergedCanvas: (state, action: PayloadAction<CanvasImage>) => {
|
||||
state.pastLayerStates.push({
|
||||
...state.layerState,
|
||||
});
|
||||
state.pastLayerStates.push(_.cloneDeep(state.layerState));
|
||||
|
||||
state.futureLayerStates = [];
|
||||
|
||||
@@ -766,10 +813,19 @@ export const canvasSlice = createSlice({
|
||||
state.isMovingStage = false;
|
||||
state.isTransformingBoundingBox = false;
|
||||
},
|
||||
mouseLeftCanvas: (state) => {
|
||||
state.cursorPosition = null;
|
||||
state.isDrawing = false;
|
||||
state.isMouseOverBoundingBox = false;
|
||||
state.isMovingBoundingBox = false;
|
||||
state.isTransformingBoundingBox = false;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const {
|
||||
addEraseRect,
|
||||
addFillRect,
|
||||
addImageToStagingArea,
|
||||
addLine,
|
||||
addPointToCurrentLine,
|
||||
@@ -779,6 +835,7 @@ export const {
|
||||
commitStagingAreaImage,
|
||||
discardStagedImages,
|
||||
fitBoundingBoxToStage,
|
||||
mouseLeftCanvas,
|
||||
nextStagingAreaImage,
|
||||
prevStagingAreaImage,
|
||||
redo,
|
||||
@@ -827,13 +884,13 @@ export const {
|
||||
setShouldSnapToGrid,
|
||||
setShouldUseInpaintReplace,
|
||||
setStageCoordinates,
|
||||
setStageDimensions,
|
||||
setStageScale,
|
||||
setTool,
|
||||
toggleShouldLockBoundingBox,
|
||||
toggleTool,
|
||||
undo,
|
||||
setScaledBoundingBoxDimensions,
|
||||
setShouldRestrictStrokesToBox,
|
||||
} = canvasSlice.actions;
|
||||
|
||||
export default canvasSlice.reducer;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { Vector2d } from 'konva/lib/types';
|
||||
import { IRect, Vector2d } from 'konva/lib/types';
|
||||
import { RgbaColor } from 'react-colorful';
|
||||
|
||||
export const LAYER_NAMES_DICT = [
|
||||
@@ -30,13 +30,6 @@ export type Dimensions = {
|
||||
height: number;
|
||||
};
|
||||
|
||||
export type CanvasAnyLine = {
|
||||
kind: 'line';
|
||||
tool: CanvasDrawingTool;
|
||||
strokeWidth: number;
|
||||
points: number[];
|
||||
};
|
||||
|
||||
export type CanvasImage = {
|
||||
kind: 'image';
|
||||
layer: 'base';
|
||||
@@ -47,24 +40,54 @@ export type CanvasImage = {
|
||||
image: InvokeAI.Image;
|
||||
};
|
||||
|
||||
export type CanvasMaskLine = CanvasAnyLine & {
|
||||
export type CanvasMaskLine = {
|
||||
layer: 'mask';
|
||||
kind: 'line';
|
||||
tool: CanvasDrawingTool;
|
||||
strokeWidth: number;
|
||||
points: number[];
|
||||
clip?: IRect;
|
||||
};
|
||||
|
||||
export type CanvasLine = CanvasAnyLine & {
|
||||
export type CanvasBaseLine = {
|
||||
layer: 'base';
|
||||
color?: RgbaColor;
|
||||
kind: 'line';
|
||||
tool: CanvasDrawingTool;
|
||||
strokeWidth: number;
|
||||
points: number[];
|
||||
clip?: IRect;
|
||||
};
|
||||
|
||||
export type CanvasObject = CanvasImage | CanvasLine | CanvasMaskLine;
|
||||
export type CanvasFillRect = {
|
||||
kind: 'fillRect';
|
||||
layer: 'base';
|
||||
x: number;
|
||||
y: number;
|
||||
width: number;
|
||||
height: number;
|
||||
color: RgbaColor;
|
||||
};
|
||||
|
||||
export type CanvasEraseRect = {
|
||||
kind: 'eraseRect';
|
||||
layer: 'base';
|
||||
x: number;
|
||||
y: number;
|
||||
width: number;
|
||||
height: number;
|
||||
};
|
||||
|
||||
export type CanvasObject =
|
||||
| CanvasImage
|
||||
| CanvasBaseLine
|
||||
| CanvasMaskLine
|
||||
| CanvasFillRect
|
||||
| CanvasEraseRect;
|
||||
|
||||
export type CanvasLayerState = {
|
||||
objects: CanvasObject[];
|
||||
stagingArea: {
|
||||
x: number;
|
||||
y: number;
|
||||
width: number;
|
||||
height: number;
|
||||
images: CanvasImage[];
|
||||
selectedImageIndex: number;
|
||||
};
|
||||
@@ -74,15 +97,21 @@ export type CanvasLayerState = {
|
||||
export const isCanvasMaskLine = (obj: CanvasObject): obj is CanvasMaskLine =>
|
||||
obj.kind === 'line' && obj.layer === 'mask';
|
||||
|
||||
export const isCanvasBaseLine = (obj: CanvasObject): obj is CanvasLine =>
|
||||
export const isCanvasBaseLine = (obj: CanvasObject): obj is CanvasBaseLine =>
|
||||
obj.kind === 'line' && obj.layer === 'base';
|
||||
|
||||
export const isCanvasBaseImage = (obj: CanvasObject): obj is CanvasImage =>
|
||||
obj.kind === 'image' && obj.layer === 'base';
|
||||
|
||||
export const isCanvasFillRect = (obj: CanvasObject): obj is CanvasFillRect =>
|
||||
obj.kind === 'fillRect' && obj.layer === 'base';
|
||||
|
||||
export const isCanvasEraseRect = (obj: CanvasObject): obj is CanvasEraseRect =>
|
||||
obj.kind === 'eraseRect' && obj.layer === 'base';
|
||||
|
||||
export const isCanvasAnyLine = (
|
||||
obj: CanvasObject
|
||||
): obj is CanvasMaskLine | CanvasLine => obj.kind === 'line';
|
||||
): obj is CanvasMaskLine | CanvasBaseLine => obj.kind === 'line';
|
||||
|
||||
export interface CanvasState {
|
||||
boundingBoxCoordinates: Vector2d;
|
||||
@@ -119,6 +148,7 @@ export interface CanvasState {
|
||||
shouldDarkenOutsideBoundingBox: boolean;
|
||||
shouldLockBoundingBox: boolean;
|
||||
shouldPreserveMaskedArea: boolean;
|
||||
shouldRestrictStrokesToBox: boolean;
|
||||
shouldShowBoundingBox: boolean;
|
||||
shouldShowBrush: boolean;
|
||||
shouldShowBrushPreview: boolean;
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import { Dimensions } from '../store/canvasTypes';
|
||||
|
||||
const getScaledBoundingBoxDimensions = (dimensions: Dimensions) => {
|
||||
const { width, height } = dimensions;
|
||||
|
||||
const scaledDimensions = { width, height };
|
||||
const targetArea = 512 * 512;
|
||||
const aspectRatio = width / height;
|
||||
let currentArea = width * height;
|
||||
let maxDimension = 448;
|
||||
while (currentArea < targetArea) {
|
||||
maxDimension += 64;
|
||||
if (width === height) {
|
||||
scaledDimensions.width = 512;
|
||||
scaledDimensions.height = 512;
|
||||
break;
|
||||
} else {
|
||||
if (aspectRatio > 1) {
|
||||
scaledDimensions.width = maxDimension;
|
||||
scaledDimensions.height = roundToMultiple(
|
||||
maxDimension / aspectRatio,
|
||||
64
|
||||
);
|
||||
} else if (aspectRatio < 1) {
|
||||
scaledDimensions.height = maxDimension;
|
||||
scaledDimensions.width = roundToMultiple(
|
||||
maxDimension * aspectRatio,
|
||||
64
|
||||
);
|
||||
}
|
||||
currentArea = scaledDimensions.width * scaledDimensions.height;
|
||||
}
|
||||
}
|
||||
|
||||
return scaledDimensions;
|
||||
};
|
||||
|
||||
export default getScaledBoundingBoxDimensions;
|
||||
@@ -27,6 +27,7 @@ import {
|
||||
FaCode,
|
||||
FaCopy,
|
||||
FaDownload,
|
||||
FaExpand,
|
||||
FaExpandArrowsAlt,
|
||||
FaGrinStars,
|
||||
FaQuoteRight,
|
||||
@@ -357,6 +358,10 @@ const CurrentImageButtons = () => {
|
||||
[currentImage, shouldShowImageDetails]
|
||||
);
|
||||
|
||||
const handleLightBox = () => {
|
||||
dispatch(setIsLightBoxOpen(!isLightBoxOpen));
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="current-image-options">
|
||||
<ButtonGroup isAttached={true}>
|
||||
@@ -396,29 +401,38 @@ const CurrentImageButtons = () => {
|
||||
</IAIButton>
|
||||
</div>
|
||||
</IAIPopover>
|
||||
<IAIIconButton
|
||||
icon={<FaExpand />}
|
||||
tooltip={!isLightBoxOpen ? 'Open In Viewer (Z)' : 'Close Viewer (Z)'}
|
||||
aria-label={
|
||||
!isLightBoxOpen ? 'Open In Viewer (Z)' : 'Close Viewer (Z)'
|
||||
}
|
||||
data-selected={isLightBoxOpen}
|
||||
onClick={handleLightBox}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
|
||||
<ButtonGroup isAttached={true}>
|
||||
<IAIIconButton
|
||||
icon={<FaQuoteRight />}
|
||||
tooltip="Use Prompt"
|
||||
aria-label="Use Prompt"
|
||||
tooltip="Use Prompt (P)"
|
||||
aria-label="Use Prompt (P)"
|
||||
isDisabled={!currentImage?.metadata?.image?.prompt}
|
||||
onClick={handleClickUsePrompt}
|
||||
/>
|
||||
|
||||
<IAIIconButton
|
||||
icon={<FaSeedling />}
|
||||
tooltip="Use Seed"
|
||||
aria-label="Use Seed"
|
||||
tooltip="Use Seed (S)"
|
||||
aria-label="Use Seed (S)"
|
||||
isDisabled={!currentImage?.metadata?.image?.seed}
|
||||
onClick={handleClickUseSeed}
|
||||
/>
|
||||
|
||||
<IAIIconButton
|
||||
icon={<FaAsterisk />}
|
||||
tooltip="Use All"
|
||||
aria-label="Use All"
|
||||
tooltip="Use All (A)"
|
||||
aria-label="Use All (A)"
|
||||
isDisabled={
|
||||
!['txt2img', 'img2img'].includes(
|
||||
currentImage?.metadata?.image?.type
|
||||
@@ -474,13 +488,15 @@ const CurrentImageButtons = () => {
|
||||
</IAIPopover>
|
||||
</ButtonGroup>
|
||||
|
||||
<IAIIconButton
|
||||
icon={<FaCode />}
|
||||
tooltip="Details"
|
||||
aria-label="Details"
|
||||
data-selected={shouldShowImageDetails}
|
||||
onClick={handleClickShowImageDetails}
|
||||
/>
|
||||
<ButtonGroup isAttached={true}>
|
||||
<IAIIconButton
|
||||
icon={<FaCode />}
|
||||
tooltip="Info (I)"
|
||||
aria-label="Info (I)"
|
||||
data-selected={shouldShowImageDetails}
|
||||
onClick={handleClickShowImageDetails}
|
||||
/>
|
||||
</ButtonGroup>
|
||||
|
||||
<DeleteImageModal image={currentImage}>
|
||||
<IAIIconButton
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
max-height: 100%;
|
||||
height: auto;
|
||||
position: absolute;
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,10 +10,7 @@ import {
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import _ from 'lodash';
|
||||
import {
|
||||
OptionsState,
|
||||
setIsLightBoxOpen,
|
||||
} from 'features/options/store/optionsSlice';
|
||||
import { OptionsState } from 'features/options/store/optionsSlice';
|
||||
import ImageMetadataViewer from './ImageMetaDataViewer/ImageMetadataViewer';
|
||||
|
||||
export const imagesSelector = createSelector(
|
||||
@@ -82,10 +79,6 @@ export default function CurrentImagePreview() {
|
||||
dispatch(selectNextImage());
|
||||
};
|
||||
|
||||
const handleLightBox = () => {
|
||||
dispatch(setIsLightBoxOpen(true));
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={'current-image-preview'}>
|
||||
{imageToDisplay && (
|
||||
@@ -93,7 +86,6 @@ export default function CurrentImagePreview() {
|
||||
src={imageToDisplay.url}
|
||||
width={imageToDisplay.width}
|
||||
height={imageToDisplay.height}
|
||||
onClick={handleLightBox}
|
||||
style={{
|
||||
imageRendering: isIntermediate ? 'pixelated' : 'initial',
|
||||
}}
|
||||
|
||||
@@ -116,7 +116,8 @@ const DeleteImageModal = forwardRef(
|
||||
<AlertDialogBody>
|
||||
<Flex direction={'column'} gap={5}>
|
||||
<Text>
|
||||
Are you sure? You can't undo this action afterwards.
|
||||
Are you sure? Deleted images will be sent to the Bin. You
|
||||
can restore from there if you wish to.
|
||||
</Text>
|
||||
<FormControl>
|
||||
<Flex alignItems={'center'}>
|
||||
|
||||
@@ -159,6 +159,11 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
e.dataTransfer.effectAllowed = 'move';
|
||||
};
|
||||
|
||||
const handleLightBox = () => {
|
||||
dispatch(setIsLightBoxOpen(true));
|
||||
dispatch(setCurrentImage(image));
|
||||
};
|
||||
|
||||
return (
|
||||
<ContextMenu.Root
|
||||
onOpenChange={(open: boolean) => {
|
||||
@@ -220,6 +225,9 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
e.detail.originalEvent.preventDefault();
|
||||
}}
|
||||
>
|
||||
<ContextMenu.Item onClickCapture={handleLightBox}>
|
||||
Open In Viewer
|
||||
</ContextMenu.Item>
|
||||
<ContextMenu.Item
|
||||
onClickCapture={handleUsePrompt}
|
||||
disabled={image?.metadata?.image?.prompt === undefined}
|
||||
|
||||
@@ -166,7 +166,7 @@ export default function ImageGallery() {
|
||||
dispatch(selectPrevImage());
|
||||
},
|
||||
{
|
||||
enabled: !isStaging,
|
||||
enabled: !isStaging || activeTabName !== 'unifiedCanvas',
|
||||
},
|
||||
[isStaging]
|
||||
);
|
||||
@@ -177,7 +177,7 @@ export default function ImageGallery() {
|
||||
dispatch(selectNextImage());
|
||||
},
|
||||
{
|
||||
enabled: !isStaging,
|
||||
enabled: !isStaging || activeTabName !== 'unifiedCanvas',
|
||||
},
|
||||
[isStaging]
|
||||
);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
top: 0;
|
||||
background-color: var(--background-color-secondary);
|
||||
z-index: 30;
|
||||
animation: popIn 0.3s ease-in;
|
||||
|
||||
.image-gallery-wrapper {
|
||||
max-height: 100% !important;
|
||||
@@ -75,3 +76,14 @@
|
||||
row-gap: 0.5rem;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes popIn {
|
||||
from {
|
||||
opacity: 0;
|
||||
filter: blur(100);
|
||||
}
|
||||
to {
|
||||
opacity: 1;
|
||||
filter: blur(0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,12 @@ export default function ReactPanZoom({
|
||||
};
|
||||
|
||||
return (
|
||||
<TransformWrapper centerOnInit minScale={0.1}>
|
||||
<TransformWrapper
|
||||
centerOnInit
|
||||
minScale={0.1}
|
||||
initialPositionX={50}
|
||||
initialPositionY={50}
|
||||
>
|
||||
{({ zoomIn, zoomOut, resetTransform, centerView }) => (
|
||||
<>
|
||||
<div className="lightbox-image-options">
|
||||
@@ -103,8 +108,12 @@ export default function ReactPanZoom({
|
||||
fontSize={20}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<TransformComponent wrapperStyle={{ width: '100%', height: '100%' }}>
|
||||
<TransformComponent
|
||||
wrapperStyle={{
|
||||
width: '100%',
|
||||
height: '100%',
|
||||
}}
|
||||
>
|
||||
<img
|
||||
style={{
|
||||
transform: `rotate(${rotation * 90}deg) scaleX(${
|
||||
@@ -116,7 +125,7 @@ export default function ReactPanZoom({
|
||||
alt={alt}
|
||||
ref={ref}
|
||||
className={styleClass ? styleClass : ''}
|
||||
onLoad={() => centerView()}
|
||||
onLoad={() => centerView(1, 0, 'easeOut')}
|
||||
/>
|
||||
</TransformComponent>
|
||||
</>
|
||||
|
||||
@@ -5,7 +5,6 @@ import IAISelect from 'common/components/IAISelect';
|
||||
import IAISlider from 'common/components/IAISlider';
|
||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
setBoundingBoxDimensions,
|
||||
setBoundingBoxScaleMethod,
|
||||
setScaledBoundingBoxDimensions,
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
@@ -31,13 +30,11 @@ const selector = createSelector(
|
||||
const { infill_methods: availableInfillMethods } = system;
|
||||
|
||||
const {
|
||||
boundingBoxDimensions,
|
||||
boundingBoxScaleMethod: boundingBoxScale,
|
||||
scaledBoundingBoxDimensions,
|
||||
} = canvas;
|
||||
|
||||
return {
|
||||
boundingBoxDimensions,
|
||||
boundingBoxScale,
|
||||
scaledBoundingBoxDimensions,
|
||||
tileSize,
|
||||
@@ -58,7 +55,6 @@ const InfillAndScalingOptions = () => {
|
||||
const {
|
||||
tileSize,
|
||||
infillMethod,
|
||||
boundingBoxDimensions,
|
||||
availableInfillMethods,
|
||||
boundingBoxScale,
|
||||
isManual,
|
||||
@@ -105,7 +101,6 @@ const InfillAndScalingOptions = () => {
|
||||
e: ChangeEvent<HTMLSelectElement>
|
||||
) => {
|
||||
dispatch(setBoundingBoxScaleMethod(e.target.value as BoundingBoxScale));
|
||||
dispatch(setBoundingBoxDimensions(boundingBoxDimensions));
|
||||
};
|
||||
|
||||
return (
|
||||
|
||||
@@ -72,8 +72,8 @@ const SeamCorrectionOptions = () => {
|
||||
<IAISlider
|
||||
sliderMarkRightOffset={-2}
|
||||
label={'Seam Strength'}
|
||||
min={0}
|
||||
max={1}
|
||||
min={0.01}
|
||||
max={0.99}
|
||||
step={0.01}
|
||||
value={seamStrength}
|
||||
onChange={(v) => {
|
||||
|
||||
@@ -19,7 +19,7 @@ export default function ImageToImageStrength(props: ImageToImageStrengthProps) {
|
||||
const handleChangeStrength = (v: number) => dispatch(setImg2imgStrength(v));
|
||||
|
||||
const handleImg2ImgStrengthReset = () => {
|
||||
dispatch(setImg2imgStrength(0.5));
|
||||
dispatch(setImg2imgStrength(0.75));
|
||||
};
|
||||
|
||||
return (
|
||||
@@ -38,16 +38,5 @@ export default function ImageToImageStrength(props: ImageToImageStrengthProps) {
|
||||
inputWidth={'5.5rem'}
|
||||
handleReset={handleImg2ImgStrengthReset}
|
||||
/>
|
||||
// <IAINumberInput
|
||||
// label={label}
|
||||
// step={0.01}
|
||||
// min={0.01}
|
||||
// max={0.99}
|
||||
// onChange={handleChangeStrength}
|
||||
// value={img2imgStrength}
|
||||
// width="100%"
|
||||
// isInteger={false}
|
||||
// styleClass={styleClass}
|
||||
// />
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { emptyTempFolder } from 'app/socketio/actions';
|
||||
import { useAppDispatch } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
||||
import IAIAlertDialog from 'common/components/IAIAlertDialog';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import { isStagingSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import {
|
||||
clearCanvasHistory,
|
||||
resetCanvas,
|
||||
@@ -9,6 +10,7 @@ import {
|
||||
import { FaTrash } from 'react-icons/fa';
|
||||
|
||||
const EmptyTempFolderButtonModal = () => {
|
||||
const isStaging = useAppSelector(isStagingSelector);
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const acceptCallback = () => {
|
||||
@@ -23,7 +25,7 @@ const EmptyTempFolderButtonModal = () => {
|
||||
acceptCallback={acceptCallback}
|
||||
acceptButtonText={'Empty Folder'}
|
||||
triggerComponent={
|
||||
<IAIButton leftIcon={<FaTrash />} size={'sm'}>
|
||||
<IAIButton leftIcon={<FaTrash />} size={'sm'} isDisabled={isStaging}>
|
||||
Empty Temp Image Folder
|
||||
</IAIButton>
|
||||
}
|
||||
|
||||
@@ -173,6 +173,16 @@ export default function HotkeysModal({ children }: HotkeysModalProps) {
|
||||
desc: 'Allows canvas navigation',
|
||||
hotkey: 'V',
|
||||
},
|
||||
{
|
||||
title: 'Fill Bounding Box',
|
||||
desc: 'Fills the bounding box with brush color',
|
||||
hotkey: 'Shift + F',
|
||||
},
|
||||
{
|
||||
title: 'Erase Bounding Box',
|
||||
desc: 'Erases the bounding box area',
|
||||
hotkey: 'Delete / Backspace',
|
||||
},
|
||||
{
|
||||
title: 'Select Color Picker',
|
||||
desc: 'Selects the canvas color picker',
|
||||
|
||||
@@ -120,7 +120,7 @@ gr = Generate(
|
||||
safety_checker:bool = activate safety checker [False]
|
||||
|
||||
# this value is sticky and maintained between generation calls
|
||||
sampler_name:str = ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'] // k_lms
|
||||
sampler_name:str = ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_dpmpp_2', 'k_dpmpp_2_a', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'] // k_lms
|
||||
|
||||
# these are deprecated - use conf and model instead
|
||||
weights = path to model weights ('models/ldm/stable-diffusion-v1/model.ckpt')
|
||||
@@ -974,6 +974,10 @@ class Generate:
|
||||
self.sampler = KSampler(self.model, 'dpm_2_ancestral', device=self.device)
|
||||
elif self.sampler_name == 'k_dpm_2':
|
||||
self.sampler = KSampler(self.model, 'dpm_2', device=self.device)
|
||||
elif self.sampler_name == 'k_dpmpp_2_a':
|
||||
self.sampler = KSampler(self.model, 'dpmpp_2s_ancestral', device=self.device)
|
||||
elif self.sampler_name == 'k_dpmpp_2':
|
||||
self.sampler = KSampler(self.model, 'dpmpp_2m', device=self.device)
|
||||
elif self.sampler_name == 'k_euler_a':
|
||||
self.sampler = KSampler(self.model, 'euler_ancestral', device=self.device)
|
||||
elif self.sampler_name == 'k_euler':
|
||||
|
||||
@@ -101,6 +101,8 @@ SAMPLER_CHOICES = [
|
||||
'ddim',
|
||||
'k_dpm_2_a',
|
||||
'k_dpm_2',
|
||||
'k_dpmpp_2_a',
|
||||
'k_dpmpp_2',
|
||||
'k_euler_a',
|
||||
'k_euler',
|
||||
'k_heun',
|
||||
@@ -461,9 +463,12 @@ class Args(object):
|
||||
default='auto',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--nsfw_checker'
|
||||
'--safety_checker',
|
||||
action='store_true',
|
||||
help='Check for and blur potentially NSFW images',
|
||||
action=argparse.BooleanOptionalAction,
|
||||
dest='safety_checker',
|
||||
default=False,
|
||||
help='Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--patchmatch',
|
||||
|
||||
@@ -36,7 +36,7 @@ class Concepts(object):
|
||||
models = self.hf_api.list_models(filter=ModelFilter(model_name='sd-concepts-library/'))
|
||||
self.concept_list = [a.id.split('/')[1] for a in models]
|
||||
except Exception as e:
|
||||
print(' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.')
|
||||
print(f' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.')
|
||||
print(' ** You may load .bin and .pt file(s) manually using the --embedding_directory argument.')
|
||||
return self.concept_list
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import torch
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
import os.path as osp
|
||||
import traceback
|
||||
from tqdm import tqdm, trange
|
||||
from PIL import Image, ImageFilter, ImageChops
|
||||
@@ -32,6 +33,7 @@ class Generator():
|
||||
self.with_variations = []
|
||||
self.use_mps_noise = False
|
||||
self.free_gpu_mem = None
|
||||
self.caution_img = None
|
||||
|
||||
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py
|
||||
def get_make_image(self,prompt,**kwargs):
|
||||
@@ -290,13 +292,29 @@ class Generator():
|
||||
def blur(self,input):
|
||||
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
try:
|
||||
caution = Image.open(CAUTION_IMG)
|
||||
caution = caution.resize((caution.width // 2, caution.height //2))
|
||||
blurry.paste(caution,(0,0),caution)
|
||||
caution = self.get_caution_img()
|
||||
if caution:
|
||||
blurry.paste(caution,(0,0),caution)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return blurry
|
||||
|
||||
def get_caution_img(self):
|
||||
if self.caution_img:
|
||||
return self.caution_img
|
||||
# Find the caution image. If we are installed in the package directory it will
|
||||
# be six levels up. If we are in the repo directory it will be three levels up.
|
||||
for dots in ('../../..','../../../../../..'):
|
||||
caution_path = osp.join(osp.dirname(__file__),dots,CAUTION_IMG)
|
||||
if osp.exists(caution_path):
|
||||
path = caution_path
|
||||
break
|
||||
if not path:
|
||||
return
|
||||
caution = Image.open(path)
|
||||
self.caution_img = caution.resize((caution.width // 2, caution.height //2))
|
||||
return self.caution_img
|
||||
|
||||
# this is a handy routine for debugging use. Given a generated sample,
|
||||
# convert it into a PNG image and store it at the indicated path
|
||||
def save_sample(self, sample, filepath):
|
||||
|
||||
@@ -27,7 +27,7 @@ if Globals.try_patchmatch:
|
||||
print('>> Patchmatch initialized')
|
||||
infill_methods.append('patchmatch')
|
||||
else:
|
||||
print('>> Patchmatch not loaded, please see https://github.com/invoke-ai/InvokeAI/blob/patchmatch-install-docs/docs/installation/INSTALL_PATCHMATCH.md')
|
||||
print('>> Patchmatch not loaded (nonfatal)')
|
||||
else:
|
||||
print('>> Patchmatch loading disabled')
|
||||
|
||||
@@ -333,6 +333,9 @@ class Inpaint(Img2Img):
|
||||
mask_blur_radius, seam_size, seam_blur, seam_strength,
|
||||
seam_steps, tile_size, step_callback,
|
||||
inpaint_replace, enable_image_debugging,
|
||||
inpaint_width = inpaint_width,
|
||||
inpaint_height = inpaint_height,
|
||||
infill_method = infill_method,
|
||||
**kwargs)
|
||||
|
||||
return result
|
||||
|
||||
@@ -12,6 +12,8 @@ from ldm.invoke.generator.txt2img import Txt2Img
|
||||
class Omnibus(Img2Img,Txt2Img):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
self.pil_mask = None
|
||||
self.pil_image = None
|
||||
|
||||
def get_make_image(
|
||||
self,
|
||||
|
||||
@@ -101,7 +101,8 @@ class Completer(object):
|
||||
self.linebuffer = None
|
||||
self.auto_history_active = True
|
||||
self.extensions = None
|
||||
self.concepts = Concepts().list_concepts()
|
||||
self.concepts = None
|
||||
self.embedding_terms = set()
|
||||
return
|
||||
|
||||
def complete(self, text, state):
|
||||
@@ -270,16 +271,21 @@ class Completer(object):
|
||||
return matches
|
||||
|
||||
def add_embedding_terms(self, terms:list[str]):
|
||||
self.concepts = Concepts().list_concepts()
|
||||
self.concepts.extend(terms)
|
||||
self.embedding_terms = set(terms)
|
||||
if self.concepts:
|
||||
self.embedding_terms.update(self.concepts)
|
||||
|
||||
def _concept_completions(self, text, state):
|
||||
if self.concepts is None:
|
||||
self.concepts = set(Concepts().list_concepts())
|
||||
self.embedding_terms.update(self.concepts)
|
||||
|
||||
partial = text[1:] # this removes the leading '<'
|
||||
if len(partial) == 0:
|
||||
return self.concepts # whole dump - think if user wants this!
|
||||
return list(self.embedding_terms) # whole dump - think if user wants this!
|
||||
|
||||
matches = list()
|
||||
for concept in self.concepts:
|
||||
for concept in self.embedding_terms:
|
||||
if concept.startswith(partial):
|
||||
matches.append(f'<{concept}>')
|
||||
matches.sort()
|
||||
@@ -416,7 +422,11 @@ def get_completer(opt:Args, models=[])->Completer:
|
||||
readline.parse_and_bind('set skip-completed-text on')
|
||||
readline.parse_and_bind('set show-all-if-ambiguous on')
|
||||
|
||||
histfile = os.path.join(os.path.expanduser(opt.outdir), '.invoke_history')
|
||||
outdir = os.path.expanduser(opt.outdir)
|
||||
if os.path.isabs(outdir):
|
||||
histfile = os.path.join(outdir,'.invoke_history')
|
||||
else:
|
||||
histfile = os.path.join(Globals.root, outdir, '.invoke_history')
|
||||
try:
|
||||
readline.read_history_file(histfile)
|
||||
readline.set_history_length(1000)
|
||||
|
||||
@@ -1490,7 +1490,7 @@ class LatentDiffusion(DDPM):
|
||||
)
|
||||
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
||||
|
||||
logvar_t = self.logvar[t].to(self.device)
|
||||
logvar_t = self.logvar[t.item()].to(self.device)
|
||||
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
||||
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
||||
if self.learn_logvar:
|
||||
|
||||
@@ -193,7 +193,7 @@ def mkdir_and_rename(path):
|
||||
if os.path.exists(path):
|
||||
new_name = path + '_archived_' + get_timestamp()
|
||||
print('Path already exists. Rename it to [{:s}]'.format(new_name))
|
||||
os.rename(path, new_name)
|
||||
os.replace(path, new_name)
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
|
||||
@@ -74,8 +74,8 @@
|
||||
"#@title 3. Install dependencies\n",
|
||||
"import gc\n",
|
||||
"\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements.txt\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements-lin-win-colab-CUDA.txt\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements-base.txt\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements-win-colab-cuda.txt\n",
|
||||
"!pip install colab-xterm\n",
|
||||
"!pip install -r requirements-lin-win-colab-CUDA.txt\n",
|
||||
"!pip install clean-fid torchtext\n",
|
||||
@@ -262,17 +262,17 @@
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.10.4 64-bit",
|
||||
"display_name": "Python 3.9.12 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.4"
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "3ad933181bd8a04b432d3370b9dc3b0662ad032c4dfaa4e4f1596c548f763858"
|
||||
"hash": "4e870c5c5fe42db7e2c5647ae5af656ff3391bf8c2b729cbf7fa0e16ca8cb5af"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
102
scripts/configure_invokeai.py
Normal file → Executable file
102
scripts/configure_invokeai.py
Normal file → Executable file
@@ -70,10 +70,10 @@ Web version:
|
||||
Command-line version:
|
||||
python scripts/invoke.py
|
||||
|
||||
Remember to activate that 'invokeai' environment before running invoke.py.
|
||||
|
||||
Or, if you used one of the automated installers, execute "invoke.sh" (Linux/Mac)
|
||||
or "invoke.bat" (Windows) to start the script.
|
||||
If you installed manually, remember to activate the 'invokeai'
|
||||
environment before running invoke.py. If you installed using the
|
||||
automated installation script, execute "invoke.sh" (Linux/Mac) or
|
||||
"invoke.bat" (Windows) to start InvokeAI.
|
||||
|
||||
Have fun!
|
||||
'''
|
||||
@@ -108,11 +108,13 @@ completely skip this step.
|
||||
completer.complete_extensions(None) # turn off path-completion mode
|
||||
selection = None
|
||||
while selection is None:
|
||||
choice = input('Download <r>ecommended models, <c>ustomize the list, or <s>kip this step? [r]: ')
|
||||
choice = input('Download <r>ecommended models, <a>ll models, <c>ustomized list, or <s>kip this step? [r]: ')
|
||||
if choice.startswith(('r','R')) or len(choice)==0:
|
||||
selection = 'recommended'
|
||||
elif choice.startswith(('c','C')):
|
||||
selection = 'customized'
|
||||
elif choice.startswith(('a','A')):
|
||||
selection = 'all'
|
||||
elif choice.startswith(('s','S')):
|
||||
selection = 'skip'
|
||||
return selection
|
||||
@@ -166,7 +168,14 @@ def recommended_datasets()->dict:
|
||||
if Datasets[ds]['recommended']:
|
||||
datasets[ds]=True
|
||||
return datasets
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
def all_datasets()->dict:
|
||||
datasets = dict()
|
||||
for ds in Datasets.keys():
|
||||
datasets[ds]=True
|
||||
return datasets
|
||||
|
||||
#-------------------------------Authenticate against Hugging Face
|
||||
def authenticate():
|
||||
print('''
|
||||
@@ -217,7 +226,9 @@ This involves a few easy steps.
|
||||
(You can enter anything you like in the token creation field marked "Name".
|
||||
"Role" should be "read").
|
||||
|
||||
Now copy the token to your clipboard and paste it here: '''
|
||||
Now copy the token to your clipboard and paste it at the prompt. Windows
|
||||
users can paste with right-click.
|
||||
Token: '''
|
||||
)
|
||||
access_token = getpass_asterisk.getpass_asterisk()
|
||||
return access_token
|
||||
@@ -243,10 +254,10 @@ def download_weight_datasets(models:dict, access_token:str):
|
||||
for mod in models.keys():
|
||||
repo_id = Datasets[mod]['repo_id']
|
||||
filename = Datasets[mod]['file']
|
||||
print(os.path.join(Globals.root,Model_dir,Weights_dir), file=sys.stderr)
|
||||
dest = os.path.join(Globals.root,Model_dir,Weights_dir)
|
||||
success = hf_download_with_resume(
|
||||
repo_id=repo_id,
|
||||
model_dir=os.path.join(Globals.root,Model_dir,Weights_dir),
|
||||
model_dir=dest,
|
||||
model_name=filename,
|
||||
access_token=access_token
|
||||
)
|
||||
@@ -494,12 +505,12 @@ def download_clipseg():
|
||||
|
||||
#-------------------------------------
|
||||
def download_safety_checker():
|
||||
print('Installing safety model for NSFW content detection...',file=sys.stderr)
|
||||
print('Installing model for NSFW content detection...',file=sys.stderr)
|
||||
try:
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from transformers import AutoFeatureExtractor
|
||||
except ModuleNotFoundError:
|
||||
print('Error installing safety checker model:')
|
||||
print('Error installing NSFW checker model:')
|
||||
print(traceback.format_exc())
|
||||
return
|
||||
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||
@@ -520,11 +531,14 @@ def download_weights(opt:dict):
|
||||
return
|
||||
else:
|
||||
print('** Cannot download models because no Hugging Face access token could be found. Please re-run without --yes')
|
||||
return
|
||||
else:
|
||||
choice = user_wants_to_download_weights()
|
||||
|
||||
if choice == 'recommended':
|
||||
models = recommended_datasets()
|
||||
elif choice == 'all':
|
||||
models = all_datasets()
|
||||
elif choice == 'customized':
|
||||
models = select_datasets(choice)
|
||||
if models is None and yes_or_no('Quit?',default_yes=False):
|
||||
@@ -570,7 +584,8 @@ def select_root(root:str, yes_to_all:bool=False):
|
||||
completer.set_default_dir(default)
|
||||
completer.complete_extensions(())
|
||||
completer.set_line(default)
|
||||
return input(f"Select a directory in which to install InvokeAI's models and configuration files [{default}]: ") or default
|
||||
directory = input(f"Select a directory in which to install InvokeAI's models and configuration files [{default}]: ").strip(' \\')
|
||||
return directory or default
|
||||
|
||||
#-------------------------------------
|
||||
def select_outputs(root:str,yes_to_all:bool=False):
|
||||
@@ -580,11 +595,12 @@ def select_outputs(root:str,yes_to_all:bool=False):
|
||||
completer.set_default_dir(os.path.expanduser('~'))
|
||||
completer.complete_extensions(())
|
||||
completer.set_line(default)
|
||||
return input(f'Select the default directory for image outputs [{default}]: ') or default
|
||||
directory = input(f'Select the default directory for image outputs [{default}]: ').strip(' \\')
|
||||
return directory or default
|
||||
|
||||
#-------------------------------------
|
||||
def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
assert os.path.exists('./configs'),'Run this script from within the top level of the InvokeAI source code directory, "InvokeAI"'
|
||||
assert os.path.exists('./configs'),'Run this script from within the InvokeAI source code directory, "InvokeAI" or the runtime directory "invokeai".'
|
||||
|
||||
print(f'** INITIALIZING INVOKEAI RUNTIME DIRECTORY **')
|
||||
root_selected = False
|
||||
@@ -603,19 +619,50 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
print(f'\nYou may change the chosen directories at any time by editing the --root and --outdir options in "{Globals.initfile}",')
|
||||
print(f'You may also change the runtime directory by setting the environment variable INVOKEAI_ROOT.\n')
|
||||
|
||||
enable_safety_checker = True
|
||||
default_sampler = 'k_heun'
|
||||
default_steps = '20' # deliberately a string - see test below
|
||||
|
||||
sampler_choices =['ddim','k_dpm_2_a','k_dpm_2','k_euler_a','k_euler','k_heun','k_lms','plms']
|
||||
|
||||
if not yes_to_all:
|
||||
print('The NSFW (not safe for work) checker blurs out images that potentially contain sexual imagery.')
|
||||
print('It can be selectively enabled at run time with --nsfw_checker, and disabled with --no-nsfw_checker.')
|
||||
print('The following option will set whether the checker is enabled by default. Like other options, you can')
|
||||
print(f'change this setting later by editing the file {Globals.initfile}.')
|
||||
enable_safety_checker = yes_or_no('Enable the NSFW checker by default?',enable_safety_checker)
|
||||
|
||||
print('\nThe next choice selects the sampler to use by default. Samplers have different speed/performance')
|
||||
print('tradeoffs. If you are not sure what to select, accept the default.')
|
||||
sampler = None
|
||||
while sampler not in sampler_choices:
|
||||
sampler = input(f'Default sampler to use? ({", ".join(sampler_choices)}) [{default_sampler}]:') or default_sampler
|
||||
|
||||
print('\nThe number of denoising steps affects both the speed and quality of the images generated.')
|
||||
print('Higher steps often (but not always) increases the quality of the image, but increases image')
|
||||
print('generation time. This can be changed at run time. Accept the default if you are unsure.')
|
||||
steps = ''
|
||||
while not steps.isnumeric():
|
||||
steps = input(f'Default number of steps to use during generation? [{default_steps}]:') or default_steps
|
||||
else:
|
||||
sampler = default_sampler
|
||||
steps = default_steps
|
||||
|
||||
safety_checker = '--nsfw_checker' if enable_safety_checker else '--no-nsfw_checker'
|
||||
|
||||
for name in ('models','configs','embeddings'):
|
||||
os.makedirs(os.path.join(root,name), exist_ok=True)
|
||||
for src in (['configs']):
|
||||
dest = os.path.join(root,src)
|
||||
if not os.path.samefile(src,dest):
|
||||
shutil.copytree(src,dest,dirs_exist_ok=True)
|
||||
os.makedirs(outputs, exist_ok=True)
|
||||
os.makedirs(outputs, exist_ok=True)
|
||||
|
||||
init_file = os.path.expanduser(Globals.initfile)
|
||||
if not os.path.exists(init_file):
|
||||
print(f'Creating the initialization file at "{init_file}".\n')
|
||||
with open(init_file,'w') as f:
|
||||
f.write(f'''# InvokeAI initialization file
|
||||
|
||||
print(f'Creating the initialization file at "{init_file}".\n')
|
||||
with open(init_file,'w') as f:
|
||||
f.write(f'''# InvokeAI initialization file
|
||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||
# or renaming it and then running configure_invokeai.py again.
|
||||
@@ -626,23 +673,18 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
# the --outdir option controls the default location of image files.
|
||||
--outdir="{outputs}"
|
||||
|
||||
# generation arguments
|
||||
{safety_checker}
|
||||
--sampler={sampler}
|
||||
--steps={steps}
|
||||
|
||||
# You may place other frequently-used startup commands here, one or more per line.
|
||||
# Examples:
|
||||
# --web --host=0.0.0.0
|
||||
# --steps=20
|
||||
# -Ak_euler_a -C10.0
|
||||
#
|
||||
'''
|
||||
)
|
||||
else:
|
||||
print(f'Updating the initialization file at "{init_file}".\n')
|
||||
with open(init_file,'r') as infile, open(f'{init_file}.tmp','w') as outfile:
|
||||
for line in infile.readlines():
|
||||
if not line.startswith('--root') and not line.startswith('--outdir'):
|
||||
outfile.write(line)
|
||||
outfile.write(f'--root="{root}"\n')
|
||||
outfile.write(f'--outdir="{outputs}"\n')
|
||||
os.replace(f'{init_file}.tmp',init_file)
|
||||
''')
|
||||
|
||||
#-------------------------------------
|
||||
class ProgressBar():
|
||||
|
||||
@@ -385,6 +385,8 @@ SAMPLER_CHOICES = [
|
||||
'ddim',
|
||||
'k_dpm_2_a',
|
||||
'k_dpm_2',
|
||||
'k_dpmpp_2_a',
|
||||
'k_dpmpp_2',
|
||||
'k_euler_a',
|
||||
'k_euler',
|
||||
'k_heun',
|
||||
|
||||
3
setup.py
3
setup.py
@@ -81,6 +81,7 @@ setup(
|
||||
'scripts/preload_models.py', 'scripts/images2prompt.py','scripts/merge_embeddings.py'
|
||||
],
|
||||
data_files=[('frontend/dist',list_files('frontend/dist')),
|
||||
('frontend/dist/assets',list_files('frontend/dist/assets'))
|
||||
('frontend/dist/assets',list_files('frontend/dist/assets')),
|
||||
('assets',['assets/caution.png']),
|
||||
],
|
||||
)
|
||||
|
||||
BIN
source_installer/WinLongPathsEnabled.reg
Normal file
BIN
source_installer/WinLongPathsEnabled.reg
Normal file
Binary file not shown.
@@ -5,7 +5,8 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp install.sh.in invokeAI/install.sh
|
||||
chmod a+x invokeAI/install.sh
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-linux.zip invokeAI
|
||||
@@ -14,10 +15,11 @@ zip -r invokeAI-src-installer-mac.zip invokeAI
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp install.bat.in invokeAI/install.bat
|
||||
cp readme.txt invokeAI
|
||||
cp WinLongPathsEnabled.reg invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-windows.zip invokeAI
|
||||
|
||||
rm -rf invokeAI
|
||||
echo "The installer zips are ready to be distributed.."
|
||||
|
||||
@@ -5,12 +5,17 @@
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
@rem Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
echo "<Press any key to start the install process>"
|
||||
pause
|
||||
echo ""
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
@@ -84,23 +89,23 @@ copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
copy source_installer\invoke.bat invoke.bat
|
||||
copy source_installer\update.bat update.bat
|
||||
copy source_installer\invoke.bat.in .\invoke.bat
|
||||
copy source_installer\update.bat.in .\update.bat
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
@rem call configure script
|
||||
call python scripts\configure_invokeai.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "The configure script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
@@ -114,5 +119,6 @@ echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
exit /b
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user