mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 13:08:12 -05:00
Compare commits
196 Commits
v2.3.3-rc1
...
invokeai-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dbd2161601 | ||
|
|
1f83ac2eae | ||
|
|
f7bb68d01c | ||
|
|
8cddf9c5b3 | ||
|
|
9b546ccf06 | ||
|
|
73dbf73a95 | ||
|
|
18a1f3893f | ||
|
|
018d5dab53 | ||
|
|
96a5de30e3 | ||
|
|
4d62d5b802 | ||
|
|
17de5c7008 | ||
|
|
f95403dcda | ||
|
|
e54d060d17 | ||
|
|
a01f1d4940 | ||
|
|
1873817ac9 | ||
|
|
31333a736c | ||
|
|
03274b6da6 | ||
|
|
0646649c05 | ||
|
|
2af511c98a | ||
|
|
f0039cc70a | ||
|
|
8fa7d5ca64 | ||
|
|
d90aa42799 | ||
|
|
c5b34d21e5 | ||
|
|
40a4867143 | ||
|
|
4b25f80427 | ||
|
|
894e2e643d | ||
|
|
a38ff1a16b | ||
|
|
41f268b475 | ||
|
|
b3ae3f595f | ||
|
|
29962613d8 | ||
|
|
1170cee1d8 | ||
|
|
5983e65b22 | ||
|
|
bc724fcdc3 | ||
|
|
1faf9c5cdd | ||
|
|
6d1f8e6997 | ||
|
|
b141ab42d3 | ||
|
|
0590bd6626 | ||
|
|
35c4ff8ab0 | ||
|
|
0784e49d92 | ||
|
|
09fe21116b | ||
|
|
b185931f84 | ||
|
|
1a4d229650 | ||
|
|
e9d2205976 | ||
|
|
4b624dccf0 | ||
|
|
3dffa33097 | ||
|
|
ab9756b8d2 | ||
|
|
4b74b51ffe | ||
|
|
0a020e1c06 | ||
|
|
baf60948ee | ||
|
|
4e4fa1b71d | ||
|
|
7bd870febb | ||
|
|
b62cce20b8 | ||
|
|
6a8848b61f | ||
|
|
c8fa01908c | ||
|
|
261be4e2e5 | ||
|
|
e0695234e7 | ||
|
|
cb1d433f30 | ||
|
|
e3772f674d | ||
|
|
ad5142d6f7 | ||
|
|
fc4b76c8b9 | ||
|
|
1e6d804104 | ||
|
|
793488e90a | ||
|
|
11cd8d026f | ||
|
|
25faec8d70 | ||
|
|
a14fc3ace5 | ||
|
|
667dee7b22 | ||
|
|
f75a20b218 | ||
|
|
8246e4abf2 | ||
|
|
afcb278e66 | ||
|
|
0a0e44b51e | ||
|
|
d4d3441a52 | ||
|
|
3a0fed2fda | ||
|
|
fad6fc807b | ||
|
|
63ecdb19fe | ||
|
|
d7b2dbba66 | ||
|
|
16aeb8d640 | ||
|
|
e0bd30b98c | ||
|
|
90f77c047c | ||
|
|
941fc2297f | ||
|
|
110b067c52 | ||
|
|
71e4addd10 | ||
|
|
67435da996 | ||
|
|
8518f8c2ac | ||
|
|
d3b63ca0fe | ||
|
|
605ceb2e95 | ||
|
|
b632b35079 | ||
|
|
c9372f919c | ||
|
|
acd9838559 | ||
|
|
fd74f51384 | ||
|
|
1e5a44a474 | ||
|
|
78ea5d773d | ||
|
|
7547784e98 | ||
|
|
e82641d5f9 | ||
|
|
beff122d90 | ||
|
|
dabf56bee8 | ||
|
|
4faf902ec4 | ||
|
|
2c5c20c8a0 | ||
|
|
a8b9458de2 | ||
|
|
274d6238fa | ||
|
|
10400761f0 | ||
|
|
b598b844e4 | ||
|
|
8554f81e57 | ||
|
|
74ff73ffc8 | ||
|
|
993baadc22 | ||
|
|
ccfb0b94b9 | ||
|
|
8fbe019273 | ||
|
|
352805d607 | ||
|
|
879c80022e | ||
|
|
ea5f6b9826 | ||
|
|
4145e27ce6 | ||
|
|
3d4f4b677f | ||
|
|
249173faf5 | ||
|
|
794ef868af | ||
|
|
a1ed22517f | ||
|
|
3765ee9b59 | ||
|
|
91e4c60876 | ||
|
|
46e578e1ef | ||
|
|
3a8ef0a00c | ||
|
|
2a586f3179 | ||
|
|
6ce24846eb | ||
|
|
c2487e4330 | ||
|
|
cf262dd2ea | ||
|
|
5a8d66ab02 | ||
|
|
b0b0c48d8a | ||
|
|
8404e06d77 | ||
|
|
a91d01c27a | ||
|
|
5eeca47887 | ||
|
|
66b361294b | ||
|
|
0fb1e79a0b | ||
|
|
14f1efaf4f | ||
|
|
23aa17e387 | ||
|
|
f23cc54e1b | ||
|
|
e3d992d5d7 | ||
|
|
bb972b2e3d | ||
|
|
41a8fdea53 | ||
|
|
a78ff86e42 | ||
|
|
8e2fd4c96a | ||
|
|
2f424f29a0 | ||
|
|
90f00db032 | ||
|
|
77a63e5310 | ||
|
|
8f921741a5 | ||
|
|
071df30597 | ||
|
|
589a817952 | ||
|
|
dcb21c0f46 | ||
|
|
9cf7e5f634 | ||
|
|
d9c46277ea | ||
|
|
c22d529528 | ||
|
|
cd98d88fe7 | ||
|
|
34e3aa1f88 | ||
|
|
49ffb64ef3 | ||
|
|
ef822902d4 | ||
|
|
ec14e2db35 | ||
|
|
5725fcb3e0 | ||
|
|
1447b6df96 | ||
|
|
e700da23d8 | ||
|
|
036ca31282 | ||
|
|
7dbe027b18 | ||
|
|
523e44ccfe | ||
|
|
6a7948466e | ||
|
|
4ce8b1ba21 | ||
|
|
68a3132d81 | ||
|
|
b69f9d4af1 | ||
|
|
6a1129ab64 | ||
|
|
8e1fd92e7f | ||
|
|
f64a4db5fa | ||
|
|
3f477da46c | ||
|
|
71972c3709 | ||
|
|
d4083221a6 | ||
|
|
5b4a241f5c | ||
|
|
cd333e414b | ||
|
|
af3543a8c7 | ||
|
|
686f6ef8d6 | ||
|
|
f70b7272f3 | ||
|
|
24d92979db | ||
|
|
c669336d6b | ||
|
|
5529309e73 | ||
|
|
49c0516602 | ||
|
|
c1c62f770f | ||
|
|
e2b6dfeeb9 | ||
|
|
8f527c2b2d | ||
|
|
3732af63e8 | ||
|
|
de89041779 | ||
|
|
488326dd95 | ||
|
|
c3edede73f | ||
|
|
6e730bd654 | ||
|
|
884a5543c7 | ||
|
|
ac972ebbe3 | ||
|
|
3c6c18b34c | ||
|
|
8f6e43d4a4 | ||
|
|
404000bf93 | ||
|
|
e744774171 | ||
|
|
096e1d3a5d | ||
|
|
82e4d5aed2 | ||
|
|
5a7145c485 | ||
|
|
afc8639c25 | ||
|
|
141be95c2c |
6
.coveragerc
Normal file
6
.coveragerc
Normal file
@@ -0,0 +1,6 @@
|
||||
[run]
|
||||
omit='.env/*'
|
||||
source='.'
|
||||
|
||||
[report]
|
||||
show_missing = true
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -68,6 +68,7 @@ htmlcov/
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
cov.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
|
||||
5
.pytest.ini
Normal file
5
.pytest.ini
Normal file
@@ -0,0 +1,5 @@
|
||||
[pytest]
|
||||
DJANGO_SETTINGS_MODULE = webtas.settings
|
||||
; python_files = tests.py test_*.py *_tests.py
|
||||
|
||||
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml
|
||||
@@ -145,7 +145,7 @@ not supported.
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
```
|
||||
|
||||
_For Macintoshes, either Intel or M1/M2:_
|
||||
|
||||
Binary file not shown.
@@ -1,164 +0,0 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git, this step will be skipped.
|
||||
|
||||
@rem Next, it'll download the project's source code.
|
||||
@rem Then it will download a self-contained, standalone Python and unpack it.
|
||||
@rem Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
@rem (if necessary) install git into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
|
||||
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
|
||||
|
||||
@rem test the mamba binary
|
||||
echo ***** Micromamba version: *****
|
||||
call micromamba.exe --version
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
|
||||
|
||||
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
del /q micromamba.exe
|
||||
|
||||
@rem For 'git' only
|
||||
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
||||
|
||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
||||
set err_msg=----- InvokeAI source download failed -----
|
||||
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
|
||||
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- InvokeAI source unpack failed -----
|
||||
tar -zxf InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q InvokeAI.tgz
|
||||
|
||||
set err_msg=----- InvokeAI source copy failed -----
|
||||
cd InvokeAI-*
|
||||
xcopy . .. /e /h
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
cd ..
|
||||
|
||||
@rem cleanup
|
||||
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
|
||||
rd /s /q .dev_scripts .github docker-build tests
|
||||
del /q requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo ***** Unpacked InvokeAI source *****
|
||||
|
||||
@rem Download/unpack/clean up python-build-standalone
|
||||
set err_msg=----- Python download failed -----
|
||||
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- Python unpack failed -----
|
||||
tar -zxf python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q python.tgz
|
||||
|
||||
echo ***** Unpacked python-build-standalone *****
|
||||
|
||||
@rem create venv
|
||||
set err_msg=----- problem creating venv -----
|
||||
.\python\python -E -s -m venv .venv
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo ***** Created Python virtual environment *****
|
||||
|
||||
@rem Print venv's Python version
|
||||
set err_msg=----- problem calling venv's python -----
|
||||
echo We're running under
|
||||
.venv\Scripts\python --version
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- pip update failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Updated pip and wheel *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Installed Python dependencies *****
|
||||
|
||||
set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||
echo ***** Installed invoke launcher script ******
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q binary_installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
||||
set err_msg=----- model download clone failed -----
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
deactivate
|
||||
|
||||
echo ***** Finished downloading models *****
|
||||
|
||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
||||
pause
|
||||
exit
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
pause
|
||||
exit
|
||||
@@ -1,235 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# This script will install git (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git, this step will be skipped.
|
||||
|
||||
# Next, it'll download the project's source code.
|
||||
# Then it will download a self-contained, standalone Python and unpack it.
|
||||
# Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||
|
||||
export no_cache_dir="--no-cache-dir"
|
||||
if [ $# -ge 1 ]; then
|
||||
if [ "$1" = "use-cache" ]; then
|
||||
export no_cache_dir=""
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="darwin";;
|
||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) ;;
|
||||
arm64*) ;;
|
||||
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
MAMBA_OS_NAME=$OS_NAME
|
||||
MAMBA_ARCH=$OS_ARCH
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
MAMBA_OS_NAME="osx"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "linux" ]; then
|
||||
MAMBA_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "x86_64" ]; then
|
||||
MAMBA_ARCH="64"
|
||||
fi
|
||||
|
||||
PY_ARCH=$OS_ARCH
|
||||
if [ "$OS_ARCH" == "arm64" ]; then
|
||||
PY_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
# Compute device ('cd' segment of reqs files) detect goes here
|
||||
# This needs a ton of work
|
||||
# Suggestions:
|
||||
# - lspci
|
||||
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
|
||||
# - Surely there's a similar utility for AMD?
|
||||
CD="cuda"
|
||||
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
CD="mps"
|
||||
fi
|
||||
|
||||
# config
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
|
||||
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
|
||||
|
||||
chmod u+x ./micromamba
|
||||
|
||||
# test the mamba binary
|
||||
echo -e "\n***** Micromamba version: *****\n"
|
||||
./micromamba --version
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
||||
|
||||
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -f micromamba.exe
|
||||
|
||||
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
|
||||
|
||||
# Download/unpack/clean up InvokeAI release sourceball
|
||||
_err_msg="\n----- InvokeAI source download failed -----\n"
|
||||
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- InvokeAI source unpack failed -----\n"
|
||||
tar -zxf InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f InvokeAI.tgz
|
||||
|
||||
_err_msg="\n----- InvokeAI source copy failed -----\n"
|
||||
cd InvokeAI-*
|
||||
cp -r . ..
|
||||
_err_exit $? _err_msg
|
||||
cd ..
|
||||
|
||||
# cleanup
|
||||
rm -rf InvokeAI-*/
|
||||
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo -e "\n***** Unpacked InvokeAI source *****\n"
|
||||
|
||||
# Download/unpack/clean up python-build-standalone
|
||||
_err_msg="\n----- Python download failed -----\n"
|
||||
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- Python unpack failed -----\n"
|
||||
tar -zxf python.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f python.tgz
|
||||
|
||||
echo -e "\n***** Unpacked python-build-standalone *****\n"
|
||||
|
||||
# create venv
|
||||
_err_msg="\n----- problem creating venv -----\n"
|
||||
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
# patch sysconfig so that extensions can build properly
|
||||
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
|
||||
PYTHON_INSTALL_DIR="$(pwd)/python"
|
||||
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
|
||||
TMPFILE="$(mktemp)"
|
||||
chmod +w "${SYSCONFIG}"
|
||||
cp "${SYSCONFIG}" "${TMPFILE}"
|
||||
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
|
||||
rm -f "${TMPFILE}"
|
||||
fi
|
||||
|
||||
./python/bin/python3 -E -s -m venv .venv
|
||||
_err_exit $? _err_msg
|
||||
source .venv/bin/activate
|
||||
|
||||
echo -e "\n***** Created Python virtual environment *****\n"
|
||||
|
||||
# Print venv's Python version
|
||||
_err_msg="\n----- problem calling venv's python -----\n"
|
||||
echo -e "We're running under"
|
||||
.venv/bin/python3 --version
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+rx ./invoke.sh
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
rm -rf binary_installer/ installer_files/
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/configure_invokeai.py
|
||||
_err_msg="\n----- model download clone failed -----\n"
|
||||
_err_exit $? _err_msg
|
||||
deactivate
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo "All done! Run the command"
|
||||
echo " $scriptdir/invoke.sh"
|
||||
echo "to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
@@ -1,36 +0,0 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo OR
|
||||
echo 3. open the developer console
|
||||
set /p choice="Please enter 1, 2 or 3: "
|
||||
if /i "%choice%" == "1" (
|
||||
echo Starting the InvokeAI command-line.
|
||||
.venv\Scripts\python scripts\invoke.py %*
|
||||
) else if /i "%choice%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI.
|
||||
.venv\Scripts\python scripts\invoke.py --web %*
|
||||
) else if /i "%choice%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) else (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
deactivate
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -eu
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "OR"
|
||||
echo "3. open the developer console"
|
||||
echo "Please enter 1, 2, or 3:"
|
||||
read choice
|
||||
|
||||
case $choice in
|
||||
1)
|
||||
printf "\nStarting the InvokeAI command-line..\n";
|
||||
.venv/bin/python scripts/invoke.py $*;
|
||||
;;
|
||||
2)
|
||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
||||
.venv/bin/python scripts/invoke.py --web $*;
|
||||
;;
|
||||
3)
|
||||
printf "\nDeveloper Console:\n";
|
||||
printf "Python command is:\n\t";
|
||||
which python;
|
||||
printf "Python version is:\n\t";
|
||||
python --version;
|
||||
echo "*************************"
|
||||
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
|
||||
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
|
||||
printf "*************************\n"
|
||||
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
|
||||
/usr/bin/env "$SHELL";
|
||||
;;
|
||||
*)
|
||||
echo "Invalid selection";
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
@@ -1,33 +0,0 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
accelerate~=0.15
|
||||
albumentations
|
||||
diffusers[torch]~=0.11
|
||||
einops
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui==1.0.3
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
realesrgan
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torch==1.12.1 ; platform_system == 'Darwin'
|
||||
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
||||
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
transformers
|
||||
picklescan
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
||||
93
docs/contributing/ARCHITECTURE.md
Normal file
93
docs/contributing/ARCHITECTURE.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Invoke.AI Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
|
||||
subgraph apps[Applications]
|
||||
webui[WebUI]
|
||||
cli[CLI]
|
||||
|
||||
subgraph webapi[Web API]
|
||||
api[HTTP API]
|
||||
sio[Socket.IO]
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
subgraph invoke[Invoke]
|
||||
direction LR
|
||||
invoker
|
||||
services
|
||||
sessions
|
||||
invocations
|
||||
end
|
||||
|
||||
subgraph core[AI Core]
|
||||
Generate
|
||||
end
|
||||
|
||||
webui --> webapi
|
||||
webapi --> invoke
|
||||
cli --> invoke
|
||||
|
||||
invoker --> services & sessions
|
||||
invocations --> services
|
||||
sessions --> invocations
|
||||
|
||||
services --> core
|
||||
|
||||
%% Styles
|
||||
classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A
|
||||
classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A
|
||||
|
||||
class apps,webapi,invoke,core sg
|
||||
|
||||
```
|
||||
|
||||
## Applications
|
||||
|
||||
Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations.
|
||||
|
||||
### Web UI
|
||||
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||
|
||||
| Component | Description |
|
||||
| --- | --- |
|
||||
| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API |
|
||||
| dependencies | Creates all invoker services and the invoker, and provides them to the API |
|
||||
| events | An eventing system that could in the future be adapted to support horizontal scale-out |
|
||||
| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) |
|
||||
| routers | API definitions for different areas of API functionality |
|
||||
|
||||
### CLI
|
||||
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||
|
||||
## Invoke
|
||||
|
||||
The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services.
|
||||
|
||||
### Invoker
|
||||
|
||||
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||
|
||||
### Sessions
|
||||
|
||||
Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added.
|
||||
|
||||
The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph.
|
||||
|
||||
### Invocations
|
||||
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
|
||||
### Services
|
||||
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
|
||||
## AI Core
|
||||
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
||||
105
docs/contributing/INVOCATIONS.md
Normal file
105
docs/contributing/INVOCATIONS.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Invocations
|
||||
|
||||
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
|
||||
|
||||
## Creating a new invocation
|
||||
|
||||
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
|
||||
|
||||
An invocation looks like this:
|
||||
|
||||
```py
|
||||
class UpscaleInvocation(BaseInvocation):
|
||||
"""Upscales an image."""
|
||||
type: Literal['upscale'] = 'upscale'
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField,None] = Field(description="The input image")
|
||||
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||
level: Literal[2,4] = Field(default=2, description = "The upscale level")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||
results = context.services.generate.upscale_and_reconstruct(
|
||||
image_list = [[image, 0]],
|
||||
upscale = (self.level, self.strength),
|
||||
strength = 0.0, # GFPGAN strength
|
||||
save_original = False,
|
||||
image_callback = None,
|
||||
)
|
||||
|
||||
# Results are image and seed, unwrap for now
|
||||
# TODO: can this return multiple results?
|
||||
image_type = ImageType.RESULT
|
||||
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||
context.services.images.save(image_type, image_name, results[0][0])
|
||||
return ImageOutput(
|
||||
image = ImageField(image_type = image_type, image_name = image_name)
|
||||
)
|
||||
```
|
||||
|
||||
Each portion is important to implement correctly.
|
||||
|
||||
### Class definition and type
|
||||
```py
|
||||
class UpscaleInvocation(BaseInvocation):
|
||||
"""Upscales an image."""
|
||||
type: Literal['upscale'] = 'upscale'
|
||||
```
|
||||
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
|
||||
|
||||
### Inputs
|
||||
```py
|
||||
# Inputs
|
||||
image: Union[ImageField,None] = Field(description="The input image")
|
||||
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
||||
```
|
||||
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
|
||||
| Part | Value | Description |
|
||||
| ---- | ----- | ----------- |
|
||||
| Name | `strength` | This field is referred to as `strength` |
|
||||
| Type Hint | `float` | This field must be of type `float` |
|
||||
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
||||
|
||||
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
|
||||
|
||||
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
|
||||
|
||||
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
|
||||
|
||||
### Invoke Function
|
||||
```py
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||
results = context.services.generate.upscale_and_reconstruct(
|
||||
image_list = [[image, 0]],
|
||||
upscale = (self.level, self.strength),
|
||||
strength = 0.0, # GFPGAN strength
|
||||
save_original = False,
|
||||
image_callback = None,
|
||||
)
|
||||
|
||||
# Results are image and seed, unwrap for now
|
||||
image_type = ImageType.RESULT
|
||||
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||
context.services.images.save(image_type, image_name, results[0][0])
|
||||
return ImageOutput(
|
||||
image = ImageField(image_type = image_type, image_name = image_name)
|
||||
)
|
||||
```
|
||||
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
|
||||
|
||||
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
|
||||
|
||||
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
|
||||
|
||||
### Outputs
|
||||
```py
|
||||
class ImageOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
type: Literal['image'] = 'image'
|
||||
|
||||
image: ImageField = Field(default=None, description="The output image")
|
||||
```
|
||||
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Concepts Library
|
||||
title: Styles and Subjects
|
||||
---
|
||||
|
||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
@@ -25,10 +25,14 @@ library which downloads and merges TI files automatically upon request. You can
|
||||
also install your own or others' TI files by placing them in a designated
|
||||
directory.
|
||||
|
||||
You may also be interested in using [LoRA Models](LORAS.md) to
|
||||
generate images with specialized styles and subjects.
|
||||
|
||||
### An Example
|
||||
|
||||
Here are a few examples to illustrate how it works. All these images were
|
||||
generated using the command-line client and the Stable Diffusion 1.5 model:
|
||||
Here are a few examples to illustrate how Textual Inversion works. All
|
||||
these images were generated using the command-line client and the
|
||||
Stable Diffusion 1.5 model:
|
||||
|
||||
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
||||
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
||||
@@ -109,21 +113,50 @@ For example, TI files generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
||||
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
||||
files it finds there. At startup you will see a message similar to this one:
|
||||
files it finds there. At startup you will see messages similar to these:
|
||||
|
||||
```bash
|
||||
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
||||
>> Loading embeddings from /data/lstein/invokeai-2.3/embeddings
|
||||
| Loading v1 embedding file: style-hamunaptra
|
||||
| Loading v4 embedding file: embeddings/learned_embeds-steps-500.bin
|
||||
| Loading v2 embedding file: lfa
|
||||
| Loading v3 embedding file: easynegative
|
||||
| Loading v1 embedding file: rem_rezero
|
||||
| Loading v2 embedding file: midj-strong
|
||||
| Loading v4 embedding file: anime-background-style-v2/learned_embeds.bin
|
||||
| Loading v4 embedding file: kamon-style/learned_embeds.bin
|
||||
** Notice: kamon-style/learned_embeds.bin was trained on a model with an incompatible token dimension: 768 vs 1024.
|
||||
>> Textual inversion triggers: <anime-background-style-v2>, <easynegative>, <lfa>, <midj-strong>, <milo>, Rem3-2600, Style-Hamunaptra
|
||||
```
|
||||
|
||||
Note the `*` trigger term. This is a placeholder term that many early TI
|
||||
tutorials taught people to use rather than a more descriptive term.
|
||||
Unfortunately, if you have multiple TI files that all use this term, only the
|
||||
first one loaded will be triggered by use of the term.
|
||||
Textual Inversion embeddings trained on version 1.X stable diffusion
|
||||
models are incompatible with version 2.X models and vice-versa.
|
||||
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
||||
or more TI files together. If it encounters a collision of terms, the script
|
||||
will prompt you to select new terms that do not collide. See
|
||||
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
After the embeddings load, InvokeAI will print out a list of all the
|
||||
recognized trigger terms. To trigger the term, include it in the
|
||||
prompt exactly as written, including angle brackets if any and
|
||||
respecting the capitalization.
|
||||
|
||||
There are at least four different embedding file formats, and each uses
|
||||
a different convention for the trigger terms. In some cases, the
|
||||
trigger term is specified in the file contents and may or may not be
|
||||
surrounded by angle brackets. In the example above, `Rem3-2600`,
|
||||
`Style-Hamunaptra`, and `<midj-strong>` were specified this way and
|
||||
there is no easy way to change the term.
|
||||
|
||||
In other cases the trigger term is not contained within the embedding
|
||||
file. In this case, InvokeAI constructs a trigger term consisting of
|
||||
the base name of the file (without the file extension) surrounded by
|
||||
angle brackets. In the example above `<easynegative`> is such a file
|
||||
(the filename was `easynegative.safetensors`). In such cases, you can
|
||||
change the trigger term simply by renaming the file.
|
||||
|
||||
## Training your own Textual Inversion models
|
||||
|
||||
InvokeAI provides a script that lets you train your own Textual
|
||||
Inversion embeddings using a small number (about a half-dozen) images
|
||||
of your desired style or subject. Please see [Textual
|
||||
Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
|
||||
## Further Reading
|
||||
|
||||
|
||||
100
docs/features/LORAS.md
Normal file
100
docs/features/LORAS.md
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
title: Low-Rank Adaptation (LoRA) Models
|
||||
---
|
||||
|
||||
# :material-library-shelves: Using Low-Rank Adaptation (LoRA) Models
|
||||
|
||||
## Introduction
|
||||
|
||||
LoRA is a technique for fine-tuning Stable Diffusion models using much
|
||||
less time and memory than traditional training techniques. The
|
||||
resulting model files are much smaller than full model files, and can
|
||||
be used to generate specialized styles and subjects.
|
||||
|
||||
LoRAs are built on top of Stable Diffusion v1.x or 2.x checkpoint or
|
||||
diffusers models. To load a LoRA, you include its name in the text
|
||||
prompt using a simple syntax described below. While you will generally
|
||||
get the best results when you use the same model the LoRA was trained
|
||||
on, they will work to a greater or lesser extent with other models.
|
||||
The major caveat is that a LoRA built on top of a SD v1.x model cannot
|
||||
be used with a v2.x model, and vice-versa. If you try, you will get an
|
||||
error! You may refer to multiple LoRAs in your prompt.
|
||||
|
||||
When you apply a LoRA in a prompt you can specify a weight. The higher
|
||||
the weight, the more influence it will have on the image. Useful
|
||||
ranges for weights are usually in the 0.0 to 1.0 range (with ranges
|
||||
between 0.5 and 1.0 being most typical). However you can specify a
|
||||
higher weight if you wish. Like models, each LoRA has a slightly
|
||||
different useful weight range and will interact with other generation
|
||||
parameters such as the CFG, step count and sampler. The author of the
|
||||
LoRA will often provide guidance on the best settings, but feel free
|
||||
to experiment. Be aware that it often helps to reduce the CFG value
|
||||
when using LoRAs.
|
||||
|
||||
## Installing LoRAs
|
||||
|
||||
This is very easy! Download a LoRA model file from your favorite site
|
||||
(e.g. [CIVITAI](https://civitai.com) and place it in the `loras`
|
||||
folder in the InvokeAI root directory (usually `~invokeai/loras` on
|
||||
Linux/Macintosh machines, and `C:\Users\your-name\invokeai/loras` on
|
||||
Windows systems). If the `loras` folder does not already exist, just
|
||||
create it. The vast majority of LoRA models use the Kohya file format,
|
||||
which is a type of `.safetensors` file.
|
||||
|
||||
You may change where InvokeAI looks for the `loras` folder by passing the
|
||||
`--lora_directory` option to the `invoke.sh`/`invoke.bat` launcher, or
|
||||
by placing the option in `invokeai.init`. For example:
|
||||
|
||||
```
|
||||
invoke.sh --lora_directory=C:\Users\your-name\SDModels\lora
|
||||
```
|
||||
|
||||
## Using a LoRA in your prompt
|
||||
|
||||
To activate a LoRA use the syntax `withLora(my-lora-name,weight)`
|
||||
somewhere in the text of the prompt. The position doesn't matter; use
|
||||
whatever is most comfortable for you.
|
||||
|
||||
For example, if you have a LoRA named `parchment_people.safetensors`
|
||||
in your `loras` directory, you can load it with a weight of 0.9 with a
|
||||
prompt like this one:
|
||||
|
||||
```
|
||||
family sitting at dinner table withLora(parchment_people,0.9)
|
||||
```
|
||||
|
||||
Add additional `withLora()` phrases to load more LoRAs.
|
||||
|
||||
You may omit the weight entirely to default to a weight of 1.0:
|
||||
|
||||
```
|
||||
family sitting at dinner table withLora(parchment_people)
|
||||
```
|
||||
|
||||
If you watch the console as your prompt executes, you will see
|
||||
messages relating to the loading and execution of the LoRA. If things
|
||||
don't work as expected, note down the console messages and report them
|
||||
on the InvokeAI Issues pages or Discord channel.
|
||||
|
||||
That's pretty much all you need to know!
|
||||
|
||||
## Training Kohya Models
|
||||
|
||||
InvokeAI cannot currently train LoRA models, but it can load and use
|
||||
existing LoRA ones to generate images. While there are several LoRA
|
||||
model file formats, the predominant one is ["Kohya"
|
||||
format](https://github.com/kohya-ss/sd-scripts), written by [Kohya
|
||||
S.](https://github.com/kohya-ss). InvokeAI provides support for this
|
||||
format. For creating your own Kohya models, we recommend the Windows
|
||||
GUI written by former InvokeAI-team member
|
||||
[bmaltais](https://github.com/bmaltais), which can be found at
|
||||
[kohya_ss](https://github.com/bmaltais/kohya_ss).
|
||||
|
||||
We can also recommend the [HuggingFace DreamBooth Training
|
||||
UI](https://huggingface.co/spaces/lora-library/LoRA-DreamBooth-Training-UI),
|
||||
a paid service that supports both Textual Inversion and LoRA training.
|
||||
|
||||
You may also be interested in [Textual
|
||||
Inversion](TEXTUAL_INVERSION.md) training, which is supported by
|
||||
InvokeAI as a text console and command-line tool.
|
||||
|
||||
@@ -20,6 +20,8 @@ title: Overview
|
||||
|
||||
Scriptable access to InvokeAI's features.
|
||||
|
||||
- [Visual Manual for InvokeAI](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
|
||||
|
||||
- Image Generation
|
||||
|
||||
- [Prompt Engineering](PROMPTS.md)
|
||||
|
||||
155
docs/index.md
155
docs/index.md
@@ -142,6 +142,10 @@ This method is recommended for those familiar with running Docker containers
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
- [Visual Manual for InvokeAI v2.3.1](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
|
||||
|
||||
<!-- separator -->
|
||||
|
||||
<!-- separator -->
|
||||
|
||||
### The InvokeAI Command Line Interface
|
||||
@@ -155,6 +159,7 @@ This method is recommended for those familiar with running Docker containers
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Using LoRA models](features/LORAS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other Features](features/OTHER.md)
|
||||
@@ -165,7 +170,7 @@ This method is recommended for those familiar with running Docker containers
|
||||
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Adding custom styles and subjects via embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
<!-- seperator -->
|
||||
@@ -177,6 +182,154 @@ This method is recommended for those familiar with running Docker containers
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
### v2.3.3 <small>(29 March 2023)</small>
|
||||
|
||||
#### Bug Fixes
|
||||
1. When using legacy checkpoints with an external VAE, the VAE file is now scanned for malware prior to loading. Previously only the main model weights file was scanned.
|
||||
2. Textual inversion will select an appropriate batchsize based on whether `xformers` is active, and will default to `xformers` enabled if the library is detected.
|
||||
3. The batch script log file names have been fixed to be compatible with Windows.
|
||||
4. Occasional corruption of the `.next_prefix` file (which stores the next output file name in sequence) on Windows systems is now detected and corrected.
|
||||
5. An infinite loop when opening the developer's console from within the `invoke.sh` script has been corrected.
|
||||
|
||||
#### Enhancements
|
||||
1. It is now possible to load and run several community-contributed SD-2.0 based models, including the infamous "Illuminati" model.
|
||||
2. The "NegativePrompts" embedding file, and others like it, can now be loaded by placing it in the InvokeAI `embeddings` directory.
|
||||
3. If no `--model` is specified at launch time, InvokeAI will remember the last model used and restore it the next time it is launched.
|
||||
4. On Linux systems, the `invoke.sh` launcher now uses a prettier console-based interface. To take advantage of it, install the `dialog` package using your package manager (e.g. `sudo apt install dialog`).
|
||||
5. When loading legacy models (safetensors/ckpt) you can specify a custom config file and/or a VAE by placing like-named files in the same directory as the model following this example:
|
||||
```
|
||||
my-favorite-model.ckpt
|
||||
my-favorite-model.yaml
|
||||
my-favorite-model.vae.pt # or my-favorite-model.vae.safetensors
|
||||
```
|
||||
|
||||
### v2.3.2 <small>(13 March 2023)</small>
|
||||
|
||||
#### Bugfixes
|
||||
|
||||
Since version 2.3.1 the following bugs have been fixed:
|
||||
|
||||
1. Black images appearing for potential NSFW images when generating with legacy checkpoint models and both `--no-nsfw_checker` and `--ckpt_convert` turned on.
|
||||
2. Black images appearing when generating from models fine-tuned on Stable-Diffusion-2-1-base. When importing V2-derived models, you may be asked to select whether the model was derived from a "base" model (512 pixels) or the 768-pixel SD-2.1 model.
|
||||
3. The "Use All" button was not restoring the Hi-Res Fix setting on the WebUI
|
||||
4. When using the model installer console app, models failed to import correctly when importing from directories with spaces in their names. A similar issue with the output directory was also fixed.
|
||||
5. Crashes that occurred during model merging.
|
||||
6. Restore previous naming of Stable Diffusion base and 768 models.
|
||||
7. Upgraded to latest versions of `diffusers`, `transformers`, `safetensors` and `accelerate` libraries upstream. We hope that this will fix the `assertion NDArray > 2**32` issue that MacOS users have had when generating images larger than 768x768 pixels. Please report back.
|
||||
|
||||
As part of the upgrade to `diffusers`, the location of the diffusers-based models has changed from `models/diffusers` to `models/hub`. When you launch InvokeAI for the first time, it will prompt you to OK a one-time move. This should be quick and harmless, but if you have modified your `models/diffusers` directory in some way, for example using symlinks, you may wish to cancel the migration and make appropriate adjustments.
|
||||
|
||||
#### New "Invokeai-batch" script
|
||||
|
||||
2.3.2 introduces a new command-line only script called
|
||||
`invokeai-batch` that can be used to generate hundreds of images from
|
||||
prompts and settings that vary systematically. This can be used to try
|
||||
the same prompt across multiple combinations of models, steps, CFG
|
||||
settings and so forth. It also allows you to template prompts and
|
||||
generate a combinatorial list like: ``` a shack in the mountains,
|
||||
photograph a shack in the mountains, watercolor a shack in the
|
||||
mountains, oil painting a chalet in the mountains, photograph a chalet
|
||||
in the mountains, watercolor a chalet in the mountains, oil painting a
|
||||
shack in the desert, photograph ... ```
|
||||
|
||||
If you have a system with multiple GPUs, or a single GPU with lots of
|
||||
VRAM, you can parallelize generation across the combinatorial set,
|
||||
reducing wait times and using your system's resources efficiently
|
||||
(make sure you have good GPU cooling).
|
||||
|
||||
To try `invokeai-batch` out. Launch the "developer's console" using
|
||||
the `invoke` launcher script, or activate the invokeai virtual
|
||||
environment manually. From the console, give the command
|
||||
`invokeai-batch --help` in order to learn how the script works and
|
||||
create your first template file for dynamic prompt generation.
|
||||
|
||||
### v2.3.1 <small>(26 February 2023)</small>
|
||||
|
||||
This is primarily a bugfix release, but it does provide several new features that will improve the user experience.
|
||||
|
||||
#### Enhanced support for model management
|
||||
|
||||
InvokeAI now makes it convenient to add, remove and modify models. You can individually import models that are stored on your local system, scan an entire folder and its subfolders for models and import them automatically, and even directly import models from the internet by providing their download URLs. You also have the option of designating a local folder to scan for new models each time InvokeAI is restarted.
|
||||
|
||||
There are three ways of accessing the model management features:
|
||||
|
||||
1. ***From the WebUI***, click on the cube to the right of the model selection menu. This will bring up a form that allows you to import models individually from your local disk or scan a directory for models to import.
|
||||
|
||||

|
||||
|
||||
2. **Using the Model Installer App**
|
||||
|
||||
Choose option (5) _download and install models_ from the `invoke` launcher script to start a new console-based application for model management. You can use this to select from a curated set of starter models, or import checkpoint, safetensors, and diffusers models from a local disk or the internet. The example below shows importing two checkpoint URLs from popular SD sites and a HuggingFace diffusers model using its Repository ID. It also shows how to designate a folder to be scanned at startup time for new models to import.
|
||||
|
||||
Command-line users can start this app using the command `invokeai-model-install`.
|
||||
|
||||

|
||||
|
||||
3. **Using the Command Line Client (CLI)**
|
||||
|
||||
The `!install_model` and `!convert_model` commands have been enhanced to allow entering of URLs and local directories to scan and import. The first command installs .ckpt and .safetensors files as-is. The second one converts them into the faster diffusers format before installation.
|
||||
|
||||
Internally InvokeAI is able to probe the contents of a .ckpt or .safetensors file to distinguish among v1.x, v2.x and inpainting models. This means that you do **not** need to include "inpaint" in your model names to use an inpainting model. Note that Stable Diffusion v2.x models will be autoconverted into a diffusers model the first time you use it.
|
||||
|
||||
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management.
|
||||
|
||||
#### An Improved Installer Experience
|
||||
|
||||
The installer now launches a console-based UI for setting and changing commonly-used startup options:
|
||||
|
||||

|
||||
|
||||
After selecting the desired options, the installer installs several support models needed by InvokeAI's face reconstruction and upscaling features and then launches the interface for selecting and installing models shown earlier. At any time, you can edit the startup options by launching `invoke.sh`/`invoke.bat` and entering option (6) _change InvokeAI startup options_
|
||||
|
||||
Command-line users can launch the new configure app using `invokeai-configure`.
|
||||
|
||||
This release also comes with a renewed updater. To do an update without going through a whole reinstallation, launch `invoke.sh` or `invoke.bat` and choose option (9) _update InvokeAI_ . This will bring you to a screen that prompts you to update to the latest released version, to the most current development version, or any released or unreleased version you choose by selecting the tag or branch of the desired version.
|
||||
|
||||

|
||||
|
||||
Command-line users can run this interface by typing `invokeai-configure`
|
||||
|
||||
#### Image Symmetry Options
|
||||
|
||||
There are now features to generate horizontal and vertical symmetry during generation. The way these work is to wait until a selected step in the generation process and then to turn on a mirror image effect. In addition to generating some cool images, you can also use this to make side-by-side comparisons of how an image will look with more or fewer steps. Access this option from the WebUI by selecting _Symmetry_ from the image generation settings, or within the CLI by using the options `--h_symmetry_time_pct` and `--v_symmetry_time_pct` (these can be abbreviated to `--h_sym` and `--v_sym` like all other options).
|
||||
|
||||

|
||||
|
||||
#### A New Unified Canvas Look
|
||||
|
||||
This release introduces a beta version of the WebUI Unified Canvas. To try it out, open up the settings dialogue in the WebUI (gear icon) and select _Use Canvas Beta Layout_:
|
||||
|
||||

|
||||
|
||||
Refresh the screen and go to to Unified Canvas (left side of screen, third icon from the top). The new layout is designed to provide more space to work in and to keep the image controls close to the image itself:
|
||||
|
||||

|
||||
|
||||
#### Model conversion and merging within the WebUI
|
||||
|
||||
The WebUI now has an intuitive interface for model merging, as well as for permanent conversion of models from legacy .ckpt/.safetensors formats into diffusers format. These options are also available directly from the `invoke.sh`/`invoke.bat` scripts.
|
||||
|
||||
#### An easier way to contribute translations to the WebUI
|
||||
|
||||
We have migrated our translation efforts to [Weblate](https://hosted.weblate.org/engage/invokeai/), a FOSS translation product. Maintaining the growing project's translations is now far simpler for the maintainers and community. Please review our brief [translation guide](https://github.com/invoke-ai/InvokeAI/blob/v2.3.1/docs/other/TRANSLATION.md) for more information on how to contribute.
|
||||
|
||||
#### Numerous internal bugfixes and performance issues
|
||||
|
||||
This releases quashes multiple bugs that were reported in 2.3.0. Major internal changes include upgrading to `diffusers 0.13.0`, and using the `compel` library for prompt parsing. See [Detailed Change Log](#full-change-log) for a detailed list of bugs caught and squished.
|
||||
|
||||
#### Summary of InvokeAI command line scripts (all accessible via the launcher menu)
|
||||
|
||||
| Command | Description |
|
||||
|--------------------------|---------------------------------------------------------------------|
|
||||
| `invokeai` | Command line interface |
|
||||
| `invokeai --web` | Web interface |
|
||||
| `invokeai-model-install` | Model installer with console forms-based front end |
|
||||
| `invokeai-ti --gui` | Textual inversion, with a console forms-based front end |
|
||||
| `invokeai-merge --gui` | Model merging, with a console forms-based front end |
|
||||
| `invokeai-configure` | Startup configuration; can also be used to reinstall support models |
|
||||
| `invokeai-update` | InvokeAI software updater |
|
||||
|
||||
|
||||
### v2.3.0 <small>(9 February 2023)</small>
|
||||
|
||||
#### Migration to Stable Diffusion `diffusers` models
|
||||
|
||||
@@ -417,7 +417,7 @@ Then type the following commands:
|
||||
|
||||
=== "AMD System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
```
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
@@ -77,7 +77,7 @@ machine. To test, open up a terminal window and issue the following
|
||||
command:
|
||||
|
||||
```
|
||||
rocm-smi
|
||||
rocminfo
|
||||
```
|
||||
|
||||
If you get a table labeled "ROCm System Management Interface" the
|
||||
@@ -95,9 +95,17 @@ recent version of Ubuntu, 22.04. However, this [community-contributed
|
||||
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
|
||||
to work well.
|
||||
|
||||
After installation, please run `rocm-smi` a second time to confirm
|
||||
After installation, please run `rocminfo` a second time to confirm
|
||||
that the driver is present and the GPU is recognized. You may need to
|
||||
do a reboot in order to load the driver.
|
||||
do a reboot in order to load the driver. In addition, if you see
|
||||
errors relating to your username not being a member of the `render`
|
||||
group, you may fix this by adding yourself to this group with the command:
|
||||
|
||||
```
|
||||
sudo usermod -a -G render myUserName
|
||||
```
|
||||
|
||||
(Thanks to @EgoringKosmos for the usermod recipe.)
|
||||
|
||||
### Linux Install with a ROCm-docker Container
|
||||
|
||||
@@ -110,7 +118,7 @@ recipes are available
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
This will be done automatically for you if you use the installer
|
||||
|
||||
@@ -11,7 +11,7 @@ The model checkpoint files ('\*.ckpt') are the Stable Diffusion
|
||||
captioned images gathered from multiple sources.
|
||||
|
||||
Originally there was only a single Stable Diffusion weights file,
|
||||
which many people named `model.ckpt`. Now there are dozens or more
|
||||
which many people named `model.ckpt`. Now there are hundreds
|
||||
that have been fine tuned to provide particulary styles, genres, or
|
||||
other features. In addition, there are several new formats that
|
||||
improve on the original checkpoint format: a `.safetensors` format
|
||||
@@ -29,9 +29,10 @@ and performance are being made at a rapid pace. Among other features
|
||||
is the ability to download and install a `diffusers` model just by
|
||||
providing its HuggingFace repository ID.
|
||||
|
||||
While InvokeAI will continue to support `.ckpt` and `.safetensors`
|
||||
While InvokeAI will continue to support legacy `.ckpt` and `.safetensors`
|
||||
models for the near future, these are deprecated and support will
|
||||
likely be withdrawn at some point in the not-too-distant future.
|
||||
be withdrawn in version 3.0, after which all legacy models will be
|
||||
converted into diffusers at the time they are loaded.
|
||||
|
||||
This manual will guide you through installing and configuring model
|
||||
weight files and converting legacy `.ckpt` and `.safetensors` files
|
||||
@@ -89,15 +90,18 @@ aware that CIVITAI hosts many models that generate NSFW content.
|
||||
!!! note
|
||||
|
||||
InvokeAI 2.3.x does not support directly importing and
|
||||
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||
convert them into `diffusers` models using the conversion methods
|
||||
described below.
|
||||
running Stable Diffusion version 2 checkpoint models. If you
|
||||
try to import them, they will be automatically
|
||||
converted into `diffusers` models on the fly. This adds about 20s
|
||||
to loading time. To avoid this overhead, you are encouraged to
|
||||
use one of the conversion methods described below to convert them
|
||||
permanently.
|
||||
|
||||
## Installation
|
||||
|
||||
There are multiple ways to install and manage models:
|
||||
|
||||
1. The `invokeai-configure` script which will download and install them for you.
|
||||
1. The `invokeai-model-install` script which will download and install them for you.
|
||||
|
||||
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
|
||||
models files.
|
||||
@@ -105,14 +109,41 @@ There are multiple ways to install and manage models:
|
||||
3. The web interface (WebUI) has a GUI for importing and managing
|
||||
models.
|
||||
|
||||
### Installation via `invokeai-configure`
|
||||
### Installation via `invokeai-model-install`
|
||||
|
||||
From the `invoke` launcher, choose option (6) "re-run the configure
|
||||
script to download new models." This will launch the same script that
|
||||
prompted you to select models at install time. You can use this to add
|
||||
models that you skipped the first time around. It is all right to
|
||||
specify a model that was previously downloaded; the script will just
|
||||
confirm that the files are complete.
|
||||
From the `invoke` launcher, choose option (5) "Download and install
|
||||
models." This will launch the same script that prompted you to select
|
||||
models at install time. You can use this to add models that you
|
||||
skipped the first time around. It is all right to specify a model that
|
||||
was previously downloaded; the script will just confirm that the files
|
||||
are complete.
|
||||
|
||||
This script allows you to load 3d party models. Look for a large text
|
||||
entry box labeled "IMPORT LOCAL AND REMOTE MODELS." In this box, you
|
||||
can cut and paste one or more of any of the following:
|
||||
|
||||
1. A URL that points to a downloadable .ckpt or .safetensors file.
|
||||
2. A file path pointing to a .ckpt or .safetensors file.
|
||||
3. A diffusers model repo_id (from HuggingFace) in the format
|
||||
"owner/repo_name".
|
||||
4. A directory path pointing to a diffusers model directory.
|
||||
5. A directory path pointing to a directory containing a bunch of
|
||||
.ckpt and .safetensors files. All will be imported.
|
||||
|
||||
You can enter multiple items into the textbox, each one on a separate
|
||||
line. You can paste into the textbox using ctrl-shift-V or by dragging
|
||||
and dropping a file/directory from the desktop into the box.
|
||||
|
||||
The script also lets you designate a directory that will be scanned
|
||||
for new model files each time InvokeAI starts up. These models will be
|
||||
added automatically.
|
||||
|
||||
Lastly, the script gives you a checkbox option to convert legacy models
|
||||
into diffusers, or to run the legacy model directly. If you choose to
|
||||
convert, the original .ckpt/.safetensors file will **not** be deleted,
|
||||
but a new diffusers directory will be created, using twice your disk
|
||||
space. However, the diffusers version will load faster, and will be
|
||||
compatible with InvokeAI 3.0.
|
||||
|
||||
### Installation via the CLI
|
||||
|
||||
@@ -144,19 +175,15 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||
For this to work, the URL must not be password-protected. Otherwise
|
||||
you will receive a 404 error.
|
||||
|
||||
When you import a legacy model, the CLI will first ask you what type
|
||||
of model this is. You can indicate whether it is a model based on
|
||||
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||
type, or it will not load correctly. You can correct the model type
|
||||
after the fact using the `!edit_model` command.
|
||||
|
||||
The system will then ask you a few other questions about the model,
|
||||
including what size image it was trained on (usually 512x512), what
|
||||
name and description you wish to use for it, and whether you would
|
||||
like to install a custom VAE (variable autoencoder) file for the
|
||||
model. For recent models, the answer to the VAE question is usually
|
||||
"no," but it won't hurt to answer "yes".
|
||||
When you import a legacy model, the CLI will try to figure out what
|
||||
type of model it is and select the correct load configuration file.
|
||||
However, one thing it can't do is to distinguish between Stable
|
||||
Diffusion 2.x models trained on 512x512 vs 768x768 images. In this
|
||||
case, the CLI will pop up a menu of choices, asking you to select
|
||||
which type of model it is. Please consult the model documentation to
|
||||
identify the correct answer, as loading with the wrong configuration
|
||||
will lead to black images. You can correct the model type after the
|
||||
fact using the `!edit_model` command.
|
||||
|
||||
After importing, the model will load. If this is successful, you will
|
||||
be asked if you want to keep the model loaded in memory to start
|
||||
@@ -211,109 +238,6 @@ description for the model, whether to make this the default model that
|
||||
is loaded at InvokeAI startup time, and whether to replace its
|
||||
VAE. Generally the answer to the latter question is "no".
|
||||
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
models file into `diffusers` and install it.This will enable the model
|
||||
to load and run faster without loss of image quality.
|
||||
|
||||
The usage is identical to `!import_model`. You may point the command
|
||||
to either a downloaded model file on disk, or to a (non-password
|
||||
protected) URL:
|
||||
|
||||
```bash
|
||||
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
After a successful conversion, the CLI will offer you the option of
|
||||
deleting the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Optimizing a previously-installed model
|
||||
|
||||
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||
file and wish to convert it into a `diffusers` model, you can do this
|
||||
without re-downloading and converting the original file using the
|
||||
`!optimize_model` command. Simply pass the short name of an existing
|
||||
installed model:
|
||||
|
||||
```bash
|
||||
invoke> !optimize_model martians-v1.0
|
||||
```
|
||||
|
||||
The model will be converted into `diffusers` format and replace the
|
||||
previously installed version. You will again be offered the
|
||||
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Related CLI Commands
|
||||
|
||||
There are a whole series of additional model management commands in
|
||||
the CLI that you can read about in [Command-Line
|
||||
Interface](../features/CLI.md). These include:
|
||||
|
||||
* `!models` - List all installed models
|
||||
* `!switch <model name>` - Switch to the indicated model
|
||||
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||
* `!del_model <model name>` - Delete the indicated model
|
||||
|
||||
### Manually editing `configs/models.yaml`
|
||||
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||
directly.
|
||||
|
||||
You will need to download the desired `.ckpt/.safetensors` file and
|
||||
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||
`diffusers` model, record the repo_id or download the whole model
|
||||
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||
application), open the file `configs/models.yaml`, and add a new
|
||||
stanza that follows this model:
|
||||
|
||||
#### A legacy model
|
||||
|
||||
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
format: ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
```
|
||||
|
||||
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||
|
||||
#### A diffusers model
|
||||
|
||||
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||
model with a repository ID:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
repo_id: captahab/arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
And for a downloaded directory:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
path: /path/to/captahab-arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
There is additional syntax for indicating an external VAE to use with
|
||||
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||
|
||||
After you save the modified `models.yaml` file relaunch
|
||||
`invokeai`. The new model will now be available for your use.
|
||||
|
||||
### Installation via the WebUI
|
||||
|
||||
To access the WebUI Model Manager, click on the button that looks like
|
||||
@@ -393,3 +317,143 @@ And here is what the same argument looks like in `invokeai.init`:
|
||||
--no-nsfw_checker
|
||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
### Specifying a configuration file for legacy checkpoints
|
||||
|
||||
Some checkpoint files come with instructions to use a specific .yaml
|
||||
configuration file. For InvokeAI load this file correctly, please put
|
||||
the config file in the same directory as the corresponding `.ckpt` or
|
||||
`.safetensors` file and make sure the file has the same basename as
|
||||
the model file. Here is an example:
|
||||
|
||||
```bash
|
||||
wonderful-model-v2.ckpt
|
||||
wonderful-model-v2.yaml
|
||||
```
|
||||
|
||||
This is not needed for `diffusers` models, which come with their own
|
||||
pre-packaged configuration.
|
||||
|
||||
### Specifying a custom VAE file for legacy checkpoints
|
||||
|
||||
To associate a custom VAE with a legacy file, place the VAE file in
|
||||
the same directory as the corresponding `.ckpt` or
|
||||
`.safetensors` file and make sure the file has the same basename as
|
||||
the model file. Use the suffix `.vae.pt` for VAE checkpoint files, and
|
||||
`.vae.safetensors` for VAE safetensors files. There is no requirement
|
||||
that both the model and the VAE follow the same format.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
wonderful-model-v2.pt
|
||||
wonderful-model-v2.vae.safetensors
|
||||
```
|
||||
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
models file into `diffusers` and install it.This will enable the model
|
||||
to load and run faster without loss of image quality.
|
||||
|
||||
The usage is identical to `!import_model`. You may point the command
|
||||
to either a downloaded model file on disk, or to a (non-password
|
||||
protected) URL:
|
||||
|
||||
```bash
|
||||
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
After a successful conversion, the CLI will offer you the option of
|
||||
deleting the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Optimizing a previously-installed model
|
||||
|
||||
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||
file and wish to convert it into a `diffusers` model, you can do this
|
||||
without re-downloading and converting the original file using the
|
||||
`!optimize_model` command. Simply pass the short name of an existing
|
||||
installed model:
|
||||
|
||||
```bash
|
||||
invoke> !optimize_model martians-v1.0
|
||||
```
|
||||
|
||||
The model will be converted into `diffusers` format and replace the
|
||||
previously installed version. You will again be offered the
|
||||
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
Alternatively you can use the WebUI's model manager to handle diffusers
|
||||
optimization. Select the legacy model you wish to convert, and then
|
||||
look for a button labeled "Convert to Diffusers" in the upper right of
|
||||
the window.
|
||||
|
||||
### Related CLI Commands
|
||||
|
||||
There are a whole series of additional model management commands in
|
||||
the CLI that you can read about in [Command-Line
|
||||
Interface](../features/CLI.md). These include:
|
||||
|
||||
* `!models` - List all installed models
|
||||
* `!switch <model name>` - Switch to the indicated model
|
||||
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||
* `!del_model <model name>` - Delete the indicated model
|
||||
|
||||
### Manually editing `configs/models.yaml`
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||
directly.
|
||||
|
||||
You will need to download the desired `.ckpt/.safetensors` file and
|
||||
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||
`diffusers` model, record the repo_id or download the whole model
|
||||
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||
application), open the file `configs/models.yaml`, and add a new
|
||||
stanza that follows this model:
|
||||
|
||||
#### A legacy model
|
||||
|
||||
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
format: ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
```
|
||||
|
||||
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||
|
||||
#### A diffusers model
|
||||
|
||||
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||
model with a repository ID:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
repo_id: captahab/arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
And for a downloaded directory:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
path: /path/to/captahab-arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
There is additional syntax for indicating an external VAE to use with
|
||||
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||
|
||||
After you save the modified `models.yaml` file relaunch
|
||||
`invokeai`. The new model will now be available for your use.
|
||||
|
||||
|
||||
@@ -23,14 +23,16 @@ We thank them for all of their time and hard work.
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
|
||||
* @genomancer (Gregg Helt) - Model training and merging
|
||||
* @gogurtenjoyer - User support and testing
|
||||
* @whosawwhatsis - User support and testing
|
||||
|
||||
## **Contributions by**
|
||||
|
||||
- [tildebyte](https://github.com/tildebyte)
|
||||
- [Sean McLellan](https://github.com/Oceanswave)
|
||||
- [Kevin Gibbons](https://github.com/bakkot)
|
||||
- [Tesseract Cat](https://github.com/TesseractCat)
|
||||
@@ -78,6 +80,7 @@ We thank them for all of their time and hard work.
|
||||
- [psychedelicious](https://github.com/psychedelicious)
|
||||
- [damian0815](https://github.com/damian0815)
|
||||
- [Eugene Brodsky](https://github.com/ebr)
|
||||
- [Statcomm](https://github.com/statcomm)
|
||||
|
||||
## **Original CompVis Authors**
|
||||
|
||||
|
||||
@@ -144,8 +144,8 @@ class Installer:
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
pip = local[get_pip_from_venv(venv_dir)]
|
||||
pip[ "install", "--upgrade", "pip"] & FG
|
||||
python = local[get_python_from_venv(venv_dir)]
|
||||
python[ "-m", "pip", "install", "--upgrade", "pip"] & FG
|
||||
|
||||
return venv_dir
|
||||
|
||||
@@ -241,14 +241,18 @@ class InvokeAiInstance:
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
# Note that we're installing pinned versions of torch and
|
||||
# torchvision here, which *should* correspond to what is
|
||||
# in pyproject.toml. This is to prevent torch 2.0 from
|
||||
# being installed and immediately uninstalled and replaced with 1.13
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"torch",
|
||||
"torchvision",
|
||||
"torch~=1.13.1",
|
||||
"torchvision~=0.14.1",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
@@ -379,6 +383,9 @@ class InvokeAiInstance:
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
||||
|
||||
if OS == "Linux":
|
||||
shutil.copy(Path(__file__).parents[1] / "templates" / "dialogrc", self.runtime / '.dialogrc')
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
@@ -405,6 +412,22 @@ def get_pip_from_venv(venv_path: Path) -> str:
|
||||
return str(venv_path.expanduser().resolve() / pip)
|
||||
|
||||
|
||||
def get_python_from_venv(venv_path: Path) -> str:
|
||||
"""
|
||||
Given a path to a virtual environment, get the absolute path to the `python` executable
|
||||
in a cross-platform fashion. Does not validate that the python executable
|
||||
actually exists in the virtualenv.
|
||||
|
||||
:param venv_path: Path to the virtual environment
|
||||
:type venv_path: Path
|
||||
:return: Absolute path to the python executable
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
python = "Scripts\python.exe" if OS == "Windows" else "bin/python"
|
||||
return str(venv_path.expanduser().resolve() / python)
|
||||
|
||||
|
||||
def set_sys_path(venv_path: Path) -> None:
|
||||
"""
|
||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||
|
||||
27
installer/templates/dialogrc
Normal file
27
installer/templates/dialogrc
Normal file
@@ -0,0 +1,27 @@
|
||||
# Screen
|
||||
use_shadow = OFF
|
||||
use_colors = ON
|
||||
screen_color = (BLACK, BLACK, ON)
|
||||
|
||||
# Box
|
||||
dialog_color = (YELLOW, BLACK , ON)
|
||||
title_color = (YELLOW, BLACK, ON)
|
||||
border_color = (YELLOW, BLACK, OFF)
|
||||
border2_color = (YELLOW, BLACK, OFF)
|
||||
|
||||
# Button
|
||||
button_active_color = (RED, BLACK, OFF)
|
||||
button_inactive_color = (YELLOW, BLACK, OFF)
|
||||
button_label_active_color = (YELLOW,BLACK,ON)
|
||||
button_label_inactive_color = (YELLOW,BLACK,ON)
|
||||
|
||||
# Menu box
|
||||
menubox_color = (BLACK, BLACK, ON)
|
||||
menubox_border_color = (YELLOW, BLACK, OFF)
|
||||
menubox_border2_color = (YELLOW, BLACK, OFF)
|
||||
|
||||
# Menu window
|
||||
item_color = (YELLOW, BLACK, OFF)
|
||||
item_selected_color = (BLACK, YELLOW, OFF)
|
||||
tag_key_color = (YELLOW, BLACK, OFF)
|
||||
tag_key_selected_color = (BLACK, YELLOW, OFF)
|
||||
@@ -1,6 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
# coauthored by Lincoln Stein, Eugene Brodsky and JoshuaKimsey
|
||||
# MIT License
|
||||
|
||||
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
|
||||
# Copyright 2023, The InvokeAI Development Team
|
||||
|
||||
####
|
||||
@@ -14,7 +16,7 @@
|
||||
|
||||
set -eu
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
# Ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
@@ -23,133 +25,159 @@ cd "$scriptdir"
|
||||
export INVOKEAI_ROOT="$scriptdir"
|
||||
PARAMS=$@
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
# Check to see if dialog is installed (it seems to be fairly standard, but good to check regardless) and if the user has passed the --no-tui argument to disable the dialog TUI
|
||||
tui=true
|
||||
if command -v dialog &>/dev/null; then
|
||||
# This must use $@ to properly loop through the arguments passed by the user
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" == "--no-tui" ]; then
|
||||
tui=false
|
||||
# Remove the --no-tui argument to avoid errors later on when passing arguments to InvokeAI
|
||||
PARAMS=$(echo "$PARAMS" | sed 's/--no-tui//')
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
tui=false
|
||||
fi
|
||||
|
||||
# Set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
# Primary function for the case statement to determine user input
|
||||
do_choice() {
|
||||
case $1 in
|
||||
1)
|
||||
echo "Generate images with a browser-based interface"
|
||||
clear
|
||||
invokeai --web $PARAMS
|
||||
;;
|
||||
2)
|
||||
echo "Generate images using a command-line interface"
|
||||
clear
|
||||
invokeai $PARAMS
|
||||
;;
|
||||
3)
|
||||
echo "Textual inversion training"
|
||||
clear
|
||||
invokeai-ti --gui $PARAMS
|
||||
;;
|
||||
4)
|
||||
echo "Merge models (diffusers type only)"
|
||||
clear
|
||||
invokeai-merge --gui $PARAMS
|
||||
;;
|
||||
5)
|
||||
echo "Download and install models"
|
||||
clear
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
6)
|
||||
echo "Change InvokeAI startup options"
|
||||
clear
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
7)
|
||||
echo "Re-run the configure script to fix a broken install"
|
||||
clear
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
;;
|
||||
8)
|
||||
echo "Open the developer console"
|
||||
clear
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
9)
|
||||
echo "Update InvokeAI"
|
||||
clear
|
||||
invokeai-update
|
||||
;;
|
||||
10)
|
||||
echo "Command-line help"
|
||||
clear
|
||||
invokeai --help
|
||||
;;
|
||||
*)
|
||||
echo "Exiting..."
|
||||
exit
|
||||
;;
|
||||
1)
|
||||
clear
|
||||
printf "Generate images with a browser-based interface\n"
|
||||
invokeai --web $PARAMS
|
||||
;;
|
||||
2)
|
||||
clear
|
||||
printf "Generate images using a command-line interface\n"
|
||||
invokeai $PARAMS
|
||||
;;
|
||||
3)
|
||||
clear
|
||||
printf "Textual inversion training\n"
|
||||
invokeai-ti --gui $PARAMS
|
||||
;;
|
||||
4)
|
||||
clear
|
||||
printf "Merge models (diffusers type only)\n"
|
||||
invokeai-merge --gui $PARAMS
|
||||
;;
|
||||
5)
|
||||
clear
|
||||
printf "Download and install models\n"
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
6)
|
||||
clear
|
||||
printf "Change InvokeAI startup options\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
7)
|
||||
clear
|
||||
printf "Re-run the configure script to fix a broken install\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
printf "Open the developer console\n"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
9)
|
||||
clear
|
||||
printf "Update InvokeAI\n"
|
||||
invokeai-update
|
||||
;;
|
||||
10)
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai --help
|
||||
;;
|
||||
"HELP 1")
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai --help
|
||||
;;
|
||||
*)
|
||||
clear
|
||||
printf "Exiting...\n"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
clear
|
||||
}
|
||||
|
||||
|
||||
# Dialog-based TUI for launcing Invoke functions
|
||||
do_dialog() {
|
||||
while true
|
||||
do
|
||||
options=(
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Generate images using a command-line interface"
|
||||
3 "Textual inversion training"
|
||||
4 "Merge models (diffusers type only)"
|
||||
5 "Download and install models"
|
||||
6 "Change InvokeAI startup options"
|
||||
7 "Re-run the configure script to fix a broken install"
|
||||
8 "Open the developer console"
|
||||
9 "Update InvokeAI"
|
||||
10 "Command-line help")
|
||||
|
||||
choice=$(dialog --clear \
|
||||
--backtitle "InvokeAI" \
|
||||
--title "What you like to run?" \
|
||||
--menu "Select an option:" \
|
||||
0 0 0 \
|
||||
"${options[@]}" \
|
||||
2>&1 >/dev/tty) || clear
|
||||
do_choice "$choice"
|
||||
done
|
||||
options=(
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Generate images using a command-line interface"
|
||||
3 "Textual inversion training"
|
||||
4 "Merge models (diffusers type only)"
|
||||
5 "Download and install models"
|
||||
6 "Change InvokeAI startup options"
|
||||
7 "Re-run the configure script to fix a broken install"
|
||||
8 "Open the developer console"
|
||||
9 "Update InvokeAI")
|
||||
|
||||
choice=$(dialog --clear \
|
||||
--backtitle "\Zb\Zu\Z3InvokeAI" \
|
||||
--colors \
|
||||
--title "What would you like to run?" \
|
||||
--ok-label "Run" \
|
||||
--cancel-label "Exit" \
|
||||
--help-button \
|
||||
--help-label "CLI Help" \
|
||||
--menu "Select an option:" \
|
||||
0 0 0 \
|
||||
"${options[@]}" \
|
||||
2>&1 >/dev/tty) || clear
|
||||
do_choice "$choice"
|
||||
clear
|
||||
}
|
||||
|
||||
# Command-line interface for launching Invoke functions
|
||||
do_line_input() {
|
||||
echo " ** For a more attractive experience, please install the 'dialog' utility. **"
|
||||
echo ""
|
||||
while true
|
||||
do
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. browser-based UI"
|
||||
echo "2. command-line interface"
|
||||
echo "3. run textual inversion training"
|
||||
echo "4. merge models (diffusers type only)"
|
||||
echo "5. download and install models"
|
||||
echo "6. change InvokeAI startup options"
|
||||
echo "7. re-run the configure script to fix a broken install"
|
||||
echo "8. open the developer console"
|
||||
echo "9. update InvokeAI"
|
||||
echo "10. command-line help"
|
||||
echo "Q - Quit"
|
||||
echo ""
|
||||
read -p "Please enter 1-10, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
do_choice $choice
|
||||
done
|
||||
clear
|
||||
printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
|
||||
printf "Do you want to generate images using the\n"
|
||||
printf "1: Browser-based UI\n"
|
||||
printf "2: Command-line interface\n"
|
||||
printf "3: Run textual inversion training\n"
|
||||
printf "4: Merge models (diffusers type only)\n"
|
||||
printf "5: Download and install models\n"
|
||||
printf "6: Change InvokeAI startup options\n"
|
||||
printf "7: Re-run the configure script to fix a broken install\n"
|
||||
printf "8: Open the developer console\n"
|
||||
printf "9: Update InvokeAI\n"
|
||||
printf "10: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
read -p "Please enter 1-10, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
do_choice $choice
|
||||
clear
|
||||
}
|
||||
|
||||
# Main IF statement for launching Invoke with either the TUI or CLI, and for checking if the user is in the developer console
|
||||
if [ "$0" != "bash" ]; then
|
||||
# Dialog seems to be a standard installtion for most Linux distros, but this checks to ensure it is present regardless
|
||||
if command -v dialog &> /dev/null ; then
|
||||
do_dialog
|
||||
else
|
||||
do_line_input
|
||||
fi
|
||||
while true; do
|
||||
if $tui; then
|
||||
# .dialogrc must be located in the same directory as the invoke.sh script
|
||||
export DIALOGRC="./.dialogrc"
|
||||
do_dialog
|
||||
else
|
||||
do_line_input
|
||||
fi
|
||||
done
|
||||
else # in developer console
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
printf "Press ^D to exit\n"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
||||
|
||||
|
||||
@@ -25,14 +25,23 @@ from invokeai.backend.modules.parameters import parameters_to_command
|
||||
import invokeai.frontend.dist as frontend
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, split_weighted_subprompts, \
|
||||
get_tokenizer
|
||||
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
|
||||
from ldm.invoke.conditioning import (
|
||||
get_tokens_for_prompt_object,
|
||||
get_prompt_structure,
|
||||
split_weighted_subprompts,
|
||||
get_tokenizer,
|
||||
)
|
||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
from ldm.invoke.globals import Globals, global_converted_ckpts_dir
|
||||
from ldm.invoke.globals import (
|
||||
Globals,
|
||||
global_converted_ckpts_dir,
|
||||
global_models_dir,
|
||||
global_lora_models_dir,
|
||||
)
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from compel.prompt_parser import Blend
|
||||
from ldm.invoke.globals import global_models_dir
|
||||
from ldm.invoke.merge_diffusers import merge_diffusion_models
|
||||
|
||||
# Loading Arguments
|
||||
@@ -193,8 +202,7 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(
|
||||
file_path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(file_path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
response = {
|
||||
@@ -224,7 +232,7 @@ class InvokeAIWebServer:
|
||||
server="flask_socketio",
|
||||
width=1600,
|
||||
height=1000,
|
||||
port=self.port
|
||||
port=self.port,
|
||||
).run()
|
||||
except KeyboardInterrupt:
|
||||
import sys
|
||||
@@ -265,16 +273,14 @@ class InvokeAIWebServer:
|
||||
# location for "finished" images
|
||||
self.result_path = args.outdir
|
||||
# temporary path for intermediates
|
||||
self.intermediate_path = os.path.join(
|
||||
self.result_path, "intermediates/")
|
||||
self.intermediate_path = os.path.join(self.result_path, "intermediates/")
|
||||
# path for user-uploaded init images and masks
|
||||
self.init_image_path = os.path.join(self.result_path, "init-images/")
|
||||
self.mask_image_path = os.path.join(self.result_path, "mask-images/")
|
||||
# path for temp images e.g. gallery generations which are not committed
|
||||
self.temp_image_path = os.path.join(self.result_path, "temp-images/")
|
||||
# path for thumbnail images
|
||||
self.thumbnail_image_path = os.path.join(
|
||||
self.result_path, "thumbnails/")
|
||||
self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/")
|
||||
# txt log
|
||||
self.log_path = os.path.join(self.result_path, "invoke_log.txt")
|
||||
# make all output paths
|
||||
@@ -299,21 +305,22 @@ class InvokeAIWebServer:
|
||||
config["infill_methods"] = infill_methods()
|
||||
socketio.emit("systemConfig", config)
|
||||
|
||||
@socketio.on('searchForModels')
|
||||
@socketio.on("searchForModels")
|
||||
def handle_search_models(search_folder: str):
|
||||
try:
|
||||
if not search_folder:
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': None, 'found_models': None},
|
||||
{"search_folder": None, "found_models": None},
|
||||
)
|
||||
else:
|
||||
search_folder, found_models = self.generate.model_manager.search_models(
|
||||
search_folder)
|
||||
(
|
||||
search_folder,
|
||||
found_models,
|
||||
) = self.generate.model_manager.search_models(search_folder)
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': search_folder,
|
||||
'found_models': found_models},
|
||||
{"search_folder": search_folder, "found_models": found_models},
|
||||
)
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
@@ -322,11 +329,11 @@ class InvokeAIWebServer:
|
||||
@socketio.on("addNewModel")
|
||||
def handle_add_model(new_model_config: dict):
|
||||
try:
|
||||
model_name = new_model_config['name']
|
||||
del new_model_config['name']
|
||||
model_name = new_model_config["name"]
|
||||
del new_model_config["name"]
|
||||
model_attributes = new_model_config
|
||||
if len(model_attributes['vae']) == 0:
|
||||
del model_attributes['vae']
|
||||
if len(model_attributes["vae"]) == 0:
|
||||
del model_attributes["vae"]
|
||||
update = False
|
||||
current_model_list = self.generate.model_manager.list_models()
|
||||
if model_name in current_model_list:
|
||||
@@ -335,14 +342,20 @@ class InvokeAIWebServer:
|
||||
print(f">> Adding New Model: {model_name}")
|
||||
|
||||
self.generate.model_manager.add_model(
|
||||
model_name=model_name, model_attributes=model_attributes, clobber=True)
|
||||
model_name=model_name,
|
||||
model_attributes=model_attributes,
|
||||
clobber=True,
|
||||
)
|
||||
self.generate.model_manager.commit(opt.conf)
|
||||
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
socketio.emit(
|
||||
"newModelAdded",
|
||||
{"new_model_name": model_name,
|
||||
"model_list": new_model_list, 'update': update},
|
||||
{
|
||||
"new_model_name": model_name,
|
||||
"model_list": new_model_list,
|
||||
"update": update,
|
||||
},
|
||||
)
|
||||
print(f">> New Model Added: {model_name}")
|
||||
except Exception as e:
|
||||
@@ -357,8 +370,10 @@ class InvokeAIWebServer:
|
||||
updated_model_list = self.generate.model_manager.list_models()
|
||||
socketio.emit(
|
||||
"modelDeleted",
|
||||
{"deleted_model_name": model_name,
|
||||
"model_list": updated_model_list},
|
||||
{
|
||||
"deleted_model_name": model_name,
|
||||
"model_list": updated_model_list,
|
||||
},
|
||||
)
|
||||
print(f">> Model Deleted: {model_name}")
|
||||
except Exception as e:
|
||||
@@ -383,41 +398,48 @@ class InvokeAIWebServer:
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on('convertToDiffusers')
|
||||
@socketio.on("convertToDiffusers")
|
||||
def convert_to_diffusers(model_to_convert: dict):
|
||||
try:
|
||||
if (model_info := self.generate.model_manager.model_info(model_name=model_to_convert['model_name'])):
|
||||
if 'weights' in model_info:
|
||||
ckpt_path = Path(model_info['weights'])
|
||||
original_config_file = Path(model_info['config'])
|
||||
model_name = model_to_convert['model_name']
|
||||
model_description = model_info['description']
|
||||
if model_info := self.generate.model_manager.model_info(
|
||||
model_name=model_to_convert["model_name"]
|
||||
):
|
||||
if "weights" in model_info:
|
||||
ckpt_path = Path(model_info["weights"])
|
||||
original_config_file = Path(model_info["config"])
|
||||
model_name = model_to_convert["model_name"]
|
||||
model_description = model_info["description"]
|
||||
else:
|
||||
self.socketio.emit(
|
||||
"error", {"message": "Model is not a valid checkpoint file"})
|
||||
"error", {"message": "Model is not a valid checkpoint file"}
|
||||
)
|
||||
else:
|
||||
self.socketio.emit(
|
||||
"error", {"message": "Could not retrieve model info."})
|
||||
"error", {"message": "Could not retrieve model info."}
|
||||
)
|
||||
|
||||
if not ckpt_path.is_absolute():
|
||||
ckpt_path = Path(Globals.root, ckpt_path)
|
||||
|
||||
if original_config_file and not original_config_file.is_absolute():
|
||||
original_config_file = Path(
|
||||
Globals.root, original_config_file)
|
||||
original_config_file = Path(Globals.root, original_config_file)
|
||||
|
||||
diffusers_path = Path(
|
||||
ckpt_path.parent.absolute(),
|
||||
f'{model_name}_diffusers'
|
||||
ckpt_path.parent.absolute(), f"{model_name}_diffusers"
|
||||
)
|
||||
|
||||
if model_to_convert['save_location'] == 'root':
|
||||
if model_to_convert["save_location"] == "root":
|
||||
diffusers_path = Path(
|
||||
global_converted_ckpts_dir(), f'{model_name}_diffusers')
|
||||
global_converted_ckpts_dir(), f"{model_name}_diffusers"
|
||||
)
|
||||
|
||||
if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None:
|
||||
if (
|
||||
model_to_convert["save_location"] == "custom"
|
||||
and model_to_convert["custom_location"] is not None
|
||||
):
|
||||
diffusers_path = Path(
|
||||
model_to_convert['custom_location'], f'{model_name}_diffusers')
|
||||
model_to_convert["custom_location"], f"{model_name}_diffusers"
|
||||
)
|
||||
|
||||
if diffusers_path.exists():
|
||||
shutil.rmtree(diffusers_path)
|
||||
@@ -435,54 +457,99 @@ class InvokeAIWebServer:
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
socketio.emit(
|
||||
"modelConverted",
|
||||
{"new_model_name": model_name,
|
||||
"model_list": new_model_list, 'update': True},
|
||||
{
|
||||
"new_model_name": model_name,
|
||||
"model_list": new_model_list,
|
||||
"update": True,
|
||||
},
|
||||
)
|
||||
print(f">> Model Converted: {model_name}")
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on('mergeDiffusersModels')
|
||||
@socketio.on("mergeDiffusersModels")
|
||||
def merge_diffusers_models(model_merge_info: dict):
|
||||
try:
|
||||
models_to_merge = model_merge_info['models_to_merge']
|
||||
models_to_merge = model_merge_info["models_to_merge"]
|
||||
model_ids_or_paths = [
|
||||
self.generate.model_manager.model_name_or_path(x) for x in models_to_merge]
|
||||
self.generate.model_manager.model_name_or_path(x)
|
||||
for x in models_to_merge
|
||||
]
|
||||
merged_pipe = merge_diffusion_models(
|
||||
model_ids_or_paths, model_merge_info['alpha'], model_merge_info['interp'], model_merge_info['force'])
|
||||
model_ids_or_paths,
|
||||
model_merge_info["alpha"],
|
||||
model_merge_info["interp"],
|
||||
model_merge_info["force"],
|
||||
)
|
||||
|
||||
dump_path = global_models_dir() / 'merged_models'
|
||||
if model_merge_info['model_merge_save_path'] is not None:
|
||||
dump_path = Path(model_merge_info['model_merge_save_path'])
|
||||
dump_path = global_models_dir() / "merged_models"
|
||||
if model_merge_info["model_merge_save_path"] is not None:
|
||||
dump_path = Path(model_merge_info["model_merge_save_path"])
|
||||
|
||||
os.makedirs(dump_path, exist_ok=True)
|
||||
dump_path = dump_path / model_merge_info['merged_model_name']
|
||||
dump_path = dump_path / model_merge_info["merged_model_name"]
|
||||
merged_pipe.save_pretrained(dump_path, safe_serialization=1)
|
||||
|
||||
merged_model_config = dict(
|
||||
model_name=model_merge_info['merged_model_name'],
|
||||
model_name=model_merge_info["merged_model_name"],
|
||||
description=f'Merge of models {", ".join(models_to_merge)}',
|
||||
commit_to_conf=opt.conf
|
||||
commit_to_conf=opt.conf,
|
||||
)
|
||||
|
||||
if vae := self.generate.model_manager.config[models_to_merge[0]].get("vae", None):
|
||||
print(
|
||||
f">> Using configured VAE assigned to {models_to_merge[0]}")
|
||||
if vae := self.generate.model_manager.config[models_to_merge[0]].get(
|
||||
"vae", None
|
||||
):
|
||||
print(f">> Using configured VAE assigned to {models_to_merge[0]}")
|
||||
merged_model_config.update(vae=vae)
|
||||
|
||||
self.generate.model_manager.import_diffuser_model(
|
||||
dump_path, **merged_model_config)
|
||||
dump_path, **merged_model_config
|
||||
)
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
|
||||
socketio.emit(
|
||||
"modelsMerged",
|
||||
{"merged_models": models_to_merge,
|
||||
"merged_model_name": model_merge_info['merged_model_name'],
|
||||
"model_list": new_model_list, 'update': True},
|
||||
{
|
||||
"merged_models": models_to_merge,
|
||||
"merged_model_name": model_merge_info["merged_model_name"],
|
||||
"model_list": new_model_list,
|
||||
"update": True,
|
||||
},
|
||||
)
|
||||
print(f">> Models Merged: {models_to_merge}")
|
||||
print(
|
||||
f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||
print(f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("getLoraModels")
|
||||
def get_lora_models():
|
||||
try:
|
||||
lora_path = global_lora_models_dir()
|
||||
loras = []
|
||||
for root, _, files in os.walk(lora_path):
|
||||
models = [
|
||||
Path(root, x)
|
||||
for x in files
|
||||
if Path(x).suffix in [".ckpt", ".pt", ".safetensors"]
|
||||
]
|
||||
loras = loras + models
|
||||
|
||||
found_loras = []
|
||||
for lora in sorted(loras, key=lambda s: s.stem.lower()):
|
||||
location = str(lora.resolve()).replace("\\", "/")
|
||||
found_loras.append({"name": lora.stem, "location": location})
|
||||
socketio.emit("foundLoras", found_loras)
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("getTextualInversionTriggers")
|
||||
def get_ti_triggers():
|
||||
try:
|
||||
local_triggers = self.generate.model.textual_inversion_manager.get_all_trigger_strings()
|
||||
locals = [{'name': x} for x in sorted(local_triggers, key=str.casefold)]
|
||||
concepts = HuggingFaceConceptsLibrary().list_concepts(minimum_likes=5)
|
||||
concepts = [{'name': f'<{x}>'} for x in sorted(concepts, key=str.casefold) if f'<{x}>' not in local_triggers]
|
||||
socketio.emit("foundTextualInversionTriggers", {'local_triggers': locals, 'huggingface_concepts': concepts})
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@@ -500,7 +567,8 @@ class InvokeAIWebServer:
|
||||
os.remove(thumbnail_path)
|
||||
except Exception as e:
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to delete {f}: {str(e)}"})
|
||||
"error", {"message": f"Unable to delete {f}: {str(e)}"}
|
||||
)
|
||||
pass
|
||||
|
||||
socketio.emit("tempFolderEmptied")
|
||||
@@ -511,8 +579,7 @@ class InvokeAIWebServer:
|
||||
def save_temp_image_to_gallery(url):
|
||||
try:
|
||||
image_path = self.get_image_path_from_url(url)
|
||||
new_path = os.path.join(
|
||||
self.result_path, os.path.basename(image_path))
|
||||
new_path = os.path.join(self.result_path, os.path.basename(image_path))
|
||||
shutil.copy2(image_path, new_path)
|
||||
|
||||
if os.path.splitext(new_path)[1] == ".png":
|
||||
@@ -525,8 +592,7 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(
|
||||
new_path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(new_path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array = [
|
||||
@@ -585,8 +651,7 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(
|
||||
path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array.append(
|
||||
@@ -605,7 +670,8 @@ class InvokeAIWebServer:
|
||||
)
|
||||
except Exception as e:
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"}
|
||||
)
|
||||
pass
|
||||
|
||||
socketio.emit(
|
||||
@@ -655,8 +721,7 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(
|
||||
path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array.append(
|
||||
@@ -676,7 +741,8 @@ class InvokeAIWebServer:
|
||||
except Exception as e:
|
||||
print(f">> Unable to load {path}")
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"}
|
||||
)
|
||||
pass
|
||||
|
||||
socketio.emit(
|
||||
@@ -710,10 +776,9 @@ class InvokeAIWebServer:
|
||||
printable_parameters["init_mask"][:64] + "..."
|
||||
)
|
||||
|
||||
print(
|
||||
f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
|
||||
print(f'>> ESRGAN Parameters: {esrgan_parameters}')
|
||||
print(f'>> Facetool Parameters: {facetool_parameters}')
|
||||
print(f"\n>> Image Generation Parameters:\n\n{printable_parameters}\n")
|
||||
print(f">> ESRGAN Parameters: {esrgan_parameters}")
|
||||
print(f">> Facetool Parameters: {facetool_parameters}")
|
||||
|
||||
self.generate_images(
|
||||
generation_parameters,
|
||||
@@ -750,11 +815,9 @@ class InvokeAIWebServer:
|
||||
if postprocessing_parameters["type"] == "esrgan":
|
||||
progress.set_current_status("common.statusUpscalingESRGAN")
|
||||
elif postprocessing_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status("common.statusRestoringFacesGFPGAN")
|
||||
elif postprocessing_parameters["type"] == "codeformer":
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status("common.statusRestoringFacesCodeFormer")
|
||||
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@@ -919,8 +982,7 @@ class InvokeAIWebServer:
|
||||
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy(
|
||||
)
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy()
|
||||
|
||||
initial_image = dataURL_to_image(
|
||||
generation_parameters["init_img"]
|
||||
@@ -997,8 +1059,9 @@ class InvokeAIWebServer:
|
||||
elif generation_parameters["generation_mode"] == "img2img":
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
init_img_path = self.get_image_path_from_url(init_img_url)
|
||||
generation_parameters["init_img"] = Image.open(
|
||||
init_img_path).convert('RGB')
|
||||
generation_parameters["init_img"] = Image.open(init_img_path).convert(
|
||||
"RGB"
|
||||
)
|
||||
|
||||
def image_progress(sample, step):
|
||||
if self.canceled.is_set():
|
||||
@@ -1058,8 +1121,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
if generation_parameters["progress_latents"]:
|
||||
image = self.generate.sample_to_lowres_estimated_image(
|
||||
sample)
|
||||
image = self.generate.sample_to_lowres_estimated_image(sample)
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
@@ -1078,8 +1140,7 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed, attention_maps_image=None):
|
||||
@@ -1106,8 +1167,7 @@ class InvokeAIWebServer:
|
||||
|
||||
progress.set_current_status("common.statusGenerationComplete")
|
||||
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
all_parameters = generation_parameters
|
||||
@@ -1118,8 +1178,7 @@ class InvokeAIWebServer:
|
||||
and all_parameters["variation_amount"] > 0
|
||||
):
|
||||
first_seed = first_seed or seed
|
||||
this_variation = [
|
||||
[seed, all_parameters["variation_amount"]]]
|
||||
this_variation = [[seed, all_parameters["variation_amount"]]]
|
||||
all_parameters["with_variations"] = (
|
||||
prior_variations + this_variation
|
||||
)
|
||||
@@ -1135,14 +1194,13 @@ class InvokeAIWebServer:
|
||||
if esrgan_parameters:
|
||||
progress.set_current_status("common.statusUpscaling")
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = self.esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["level"],
|
||||
denoise_str=esrgan_parameters['denoise_str'],
|
||||
denoise_str=esrgan_parameters["denoise_str"],
|
||||
strength=esrgan_parameters["strength"],
|
||||
seed=seed,
|
||||
)
|
||||
@@ -1150,7 +1208,7 @@ class InvokeAIWebServer:
|
||||
postprocessing = True
|
||||
all_parameters["upscale"] = [
|
||||
esrgan_parameters["level"],
|
||||
esrgan_parameters['denoise_str'],
|
||||
esrgan_parameters["denoise_str"],
|
||||
esrgan_parameters["strength"],
|
||||
]
|
||||
|
||||
@@ -1159,15 +1217,14 @@ class InvokeAIWebServer:
|
||||
|
||||
if facetool_parameters:
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status("common.statusRestoringFacesGFPGAN")
|
||||
elif facetool_parameters["type"] == "codeformer":
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesCodeFormer")
|
||||
"common.statusRestoringFacesCodeFormer"
|
||||
)
|
||||
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
@@ -1197,8 +1254,7 @@ class InvokeAIWebServer:
|
||||
all_parameters["facetool_type"] = facetool_parameters["type"]
|
||||
|
||||
progress.set_current_status("common.statusSavingImage")
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
# restore the stashed URLS and discard the paths, we are about to send the result to client
|
||||
@@ -1215,8 +1271,7 @@ class InvokeAIWebServer:
|
||||
if generation_parameters["generation_mode"] == "unifiedCanvas":
|
||||
all_parameters["bounding_box"] = original_bounding_box
|
||||
|
||||
metadata = self.parameters_to_generated_image_metadata(
|
||||
all_parameters)
|
||||
metadata = self.parameters_to_generated_image_metadata(all_parameters)
|
||||
|
||||
command = parameters_to_command(all_parameters)
|
||||
|
||||
@@ -1246,22 +1301,27 @@ class InvokeAIWebServer:
|
||||
|
||||
if progress.total_iterations > progress.current_iteration:
|
||||
progress.set_current_step(1)
|
||||
progress.set_current_status(
|
||||
"common.statusIterationComplete")
|
||||
progress.set_current_status("common.statusIterationComplete")
|
||||
progress.set_current_status_has_steps(False)
|
||||
else:
|
||||
progress.mark_complete()
|
||||
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
parsed_prompt, _ = get_prompt_structure(
|
||||
generation_parameters["prompt"])
|
||||
tokens = None if type(parsed_prompt) is Blend else \
|
||||
get_tokens_for_prompt_object(get_tokenizer(self.generate.model), parsed_prompt)
|
||||
attention_maps_image_base64_url = None if attention_maps_image is None \
|
||||
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
|
||||
tokens = (
|
||||
None
|
||||
if type(parsed_prompt) is Blend
|
||||
else get_tokens_for_prompt_object(
|
||||
get_tokenizer(self.generate.model), parsed_prompt
|
||||
)
|
||||
)
|
||||
attention_maps_image_base64_url = (
|
||||
None
|
||||
if attention_maps_image is None
|
||||
else image_to_dataURL(attention_maps_image)
|
||||
)
|
||||
|
||||
self.socketio.emit(
|
||||
"generationResult",
|
||||
@@ -1293,7 +1353,7 @@ class InvokeAIWebServer:
|
||||
self.generate.prompt2image(
|
||||
**generation_parameters,
|
||||
step_callback=diffusers_step_callback_adapter,
|
||||
image_callback=image_done
|
||||
image_callback=image_done,
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
@@ -1416,8 +1476,7 @@ class InvokeAIWebServer:
|
||||
self, parameters, original_image_path
|
||||
):
|
||||
try:
|
||||
current_metadata = retrieve_metadata(
|
||||
original_image_path)["sd-metadata"]
|
||||
current_metadata = retrieve_metadata(original_image_path)["sd-metadata"]
|
||||
postprocessing_metadata = {}
|
||||
|
||||
"""
|
||||
@@ -1457,8 +1516,7 @@ class InvokeAIWebServer:
|
||||
postprocessing_metadata
|
||||
)
|
||||
else:
|
||||
current_metadata["image"]["postprocessing"] = [
|
||||
postprocessing_metadata]
|
||||
current_metadata["image"]["postprocessing"] = [postprocessing_metadata]
|
||||
|
||||
return current_metadata
|
||||
|
||||
@@ -1554,8 +1612,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
elif "thumbnails" in url:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.thumbnail_image_path,
|
||||
os.path.basename(url))
|
||||
os.path.join(self.thumbnail_image_path, os.path.basename(url))
|
||||
)
|
||||
else:
|
||||
return os.path.abspath(
|
||||
@@ -1601,7 +1658,7 @@ class InvokeAIWebServer:
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def handle_exceptions(self, exception, emit_key: str = 'error'):
|
||||
def handle_exceptions(self, exception, emit_key: str = "error"):
|
||||
self.socketio.emit(emit_key, {"message": (str(exception))})
|
||||
print("\n")
|
||||
traceback.print_exc()
|
||||
|
||||
File diff suppressed because one or more lines are too long
1
invokeai/frontend/dist/assets/index-2ab0eb58.css
vendored
Normal file
1
invokeai/frontend/dist/assets/index-2ab0eb58.css
vendored
Normal file
File diff suppressed because one or more lines are too long
624
invokeai/frontend/dist/assets/index-c09cf9ca.js
vendored
624
invokeai/frontend/dist/assets/index-c09cf9ca.js
vendored
File diff suppressed because one or more lines are too long
603
invokeai/frontend/dist/assets/index-f56b39bc.js
vendored
Normal file
603
invokeai/frontend/dist/assets/index-f56b39bc.js
vendored
Normal file
File diff suppressed because one or more lines are too long
4
invokeai/frontend/dist/index.html
vendored
4
invokeai/frontend/dist/index.html
vendored
@@ -5,8 +5,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index-c09cf9ca.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-14cb2922.css">
|
||||
<script type="module" crossorigin src="./assets/index-f56b39bc.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-2ab0eb58.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
8
invokeai/frontend/dist/locales/en.json
vendored
8
invokeai/frontend/dist/locales/en.json
vendored
@@ -327,6 +327,13 @@
|
||||
"addModel": "Add Model",
|
||||
"updateModel": "Update Model",
|
||||
"availableModels": "Available Models",
|
||||
"addLora": "Add Lora",
|
||||
"clearLoras": "Clear Loras",
|
||||
"noLoraModels": "No Loras Found",
|
||||
"addTextualInversionTrigger": "Add Textual Inversion",
|
||||
"addTIToNegative": "Add To Negative",
|
||||
"clearTextualInversions": "Clear Textual Inversions",
|
||||
"noTextualInversionTriggers": "No Textual Inversions Found",
|
||||
"search": "Search",
|
||||
"load": "Load",
|
||||
"active": "active",
|
||||
@@ -483,6 +490,7 @@
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"showHuggingFaceConcepts": "Show Textual Inversions from HF Concepts Library",
|
||||
"resetWebUI": "Reset Web UI",
|
||||
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
|
||||
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
"postinstall": "patch-package"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/icons": "^2.0.17",
|
||||
"@chakra-ui/react": "^2.5.1",
|
||||
"@chakra-ui/icons": "^2.0.18",
|
||||
"@chakra-ui/react": "^2.5.5",
|
||||
"@emotion/cache": "^11.10.5",
|
||||
"@emotion/react": "^11.10.6",
|
||||
"@emotion/styled": "^11.10.6",
|
||||
@@ -52,6 +52,7 @@
|
||||
"redux-persist": "^6.0.0",
|
||||
"socket.io": "^4.6.0",
|
||||
"socket.io-client": "^4.6.0",
|
||||
"typescript": "^5.0.3",
|
||||
"use-image": "^1.1.0",
|
||||
"uuid": "^9.0.0",
|
||||
"yarn": "^1.22.19"
|
||||
@@ -61,8 +62,8 @@
|
||||
"@types/react": "^18.0.28",
|
||||
"@types/react-dom": "^18.0.11",
|
||||
"@types/react-transition-group": "^4.4.5",
|
||||
"@typescript-eslint/eslint-plugin": "^5.52.0",
|
||||
"@typescript-eslint/parser": "^5.52.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.57.0",
|
||||
"@typescript-eslint/parser": "^5.57.0",
|
||||
"babel-plugin-transform-imports": "^2.0.0",
|
||||
"eslint": "^8.34.0",
|
||||
"eslint-config-prettier": "^8.6.0",
|
||||
|
||||
@@ -327,6 +327,13 @@
|
||||
"addModel": "Add Model",
|
||||
"updateModel": "Update Model",
|
||||
"availableModels": "Available Models",
|
||||
"addLora": "Add Lora",
|
||||
"clearLoras": "Clear Loras",
|
||||
"noLoraModels": "No Loras Found",
|
||||
"addTextualInversionTrigger": "Add Textual Inversion",
|
||||
"addTIToNegative": "Add To Negative",
|
||||
"clearTextualInversions": "Clear Textual Inversions",
|
||||
"noTextualInversionTriggers": "No Textual Inversions Found",
|
||||
"search": "Search",
|
||||
"load": "Load",
|
||||
"active": "active",
|
||||
@@ -483,6 +490,7 @@
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"showHuggingFaceConcepts": "Show Textual Inversions from HF Concepts Library",
|
||||
"resetWebUI": "Reset Web UI",
|
||||
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
|
||||
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
|
||||
|
||||
17
invokeai/frontend/src/app/invokeai.d.ts
vendored
17
invokeai/frontend/src/app/invokeai.d.ts
vendored
@@ -271,6 +271,23 @@ export declare type FoundModelResponse = {
|
||||
found_models: FoundModel[];
|
||||
};
|
||||
|
||||
export declare type FoundLora = {
|
||||
name: string;
|
||||
location: string;
|
||||
};
|
||||
|
||||
export declare type FoundTextualInversionTriggers = {
|
||||
name: string;
|
||||
location: string;
|
||||
};
|
||||
|
||||
export declare type FoundLorasRsponse = FoundLora[];
|
||||
|
||||
export declare type FoundTextualInversionTriggersResponse = {
|
||||
local_triggers: FoundTextualInversionTriggers[];
|
||||
huggingface_concepts: FoundTextualInversionTriggers[];
|
||||
};
|
||||
|
||||
export declare type SystemStatusResponse = SystemStatus;
|
||||
|
||||
export declare type SystemConfigResponse = SystemConfig;
|
||||
|
||||
@@ -52,6 +52,12 @@ export const requestModelChange = createAction<string>(
|
||||
'socketio/requestModelChange'
|
||||
);
|
||||
|
||||
export const getLoraModels = createAction<undefined>('socketio/getLoraModels');
|
||||
|
||||
export const getTextualInversionTriggers = createAction<undefined>(
|
||||
'socketio/getTextualInversionTriggers'
|
||||
);
|
||||
|
||||
export const saveStagingAreaImageToGallery = createAction<string>(
|
||||
'socketio/saveStagingAreaImageToGallery'
|
||||
);
|
||||
|
||||
@@ -196,6 +196,12 @@ const makeSocketIOEmitters = (
|
||||
dispatch(modelChangeRequested());
|
||||
socketio.emit('requestModelChange', modelName);
|
||||
},
|
||||
emitGetLoraModels: () => {
|
||||
socketio.emit('getLoraModels');
|
||||
},
|
||||
emitGetTextualInversionTriggers: () => {
|
||||
socketio.emit('getTextualInversionTriggers');
|
||||
},
|
||||
emitSaveStagingAreaImageToGallery: (url: string) => {
|
||||
socketio.emit('requestSaveStagingAreaImageToGallery', url);
|
||||
},
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
errorOccurred,
|
||||
processingCanceled,
|
||||
setCurrentStatus,
|
||||
setFoundLoras,
|
||||
setFoundModels,
|
||||
setIsCancelable,
|
||||
setIsConnected,
|
||||
@@ -19,6 +20,8 @@ import {
|
||||
setSearchFolder,
|
||||
setSystemConfig,
|
||||
setSystemStatus,
|
||||
setFoundLocalTextualInversionTriggers,
|
||||
setFoundHuggingFaceTextualInversionTriggers,
|
||||
} from 'features/system/store/systemSlice';
|
||||
|
||||
import {
|
||||
@@ -34,8 +37,10 @@ import type { RootState } from 'app/store';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
clearInitialImage,
|
||||
setHuggingFaceTextualInversionConcepts,
|
||||
setInfillMethod,
|
||||
setInitialImage,
|
||||
setLocalTextualInversionTriggers,
|
||||
setMaskPath,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { tabMap } from 'features/ui/store/tabMap';
|
||||
@@ -482,6 +487,37 @@ const makeSocketIOListeners = (
|
||||
})
|
||||
);
|
||||
},
|
||||
onFoundLoras: (data: InvokeAI.FoundLorasRsponse) => {
|
||||
dispatch(setFoundLoras(data));
|
||||
},
|
||||
onFoundTextualInversionTriggers: (
|
||||
data: InvokeAI.FoundTextualInversionTriggersResponse
|
||||
) => {
|
||||
const localTriggers = data.local_triggers;
|
||||
const huggingFaceConcepts = data.huggingface_concepts;
|
||||
|
||||
dispatch(setFoundLocalTextualInversionTriggers(localTriggers));
|
||||
dispatch(
|
||||
setFoundHuggingFaceTextualInversionTriggers(huggingFaceConcepts)
|
||||
);
|
||||
|
||||
// Assign Local TI's
|
||||
const foundLocalTINames: string[] = [];
|
||||
localTriggers.forEach((textualInversion) => {
|
||||
foundLocalTINames.push(textualInversion.name);
|
||||
});
|
||||
dispatch(setLocalTextualInversionTriggers(foundLocalTINames));
|
||||
|
||||
// Assign HuggingFace Concepts
|
||||
const foundHuggingFaceConceptNames: string[] = [];
|
||||
huggingFaceConcepts.forEach((concept) => {
|
||||
foundHuggingFaceConceptNames.push(concept.name);
|
||||
});
|
||||
dispatch(
|
||||
setHuggingFaceTextualInversionConcepts(foundHuggingFaceConceptNames)
|
||||
);
|
||||
},
|
||||
|
||||
onTempFolderEmptied: () => {
|
||||
dispatch(
|
||||
addToast({
|
||||
|
||||
@@ -51,6 +51,8 @@ export const socketioMiddleware = () => {
|
||||
onModelConverted,
|
||||
onModelsMerged,
|
||||
onModelChangeFailed,
|
||||
onFoundLoras,
|
||||
onFoundTextualInversionTriggers,
|
||||
onTempFolderEmptied,
|
||||
} = makeSocketIOListeners(store);
|
||||
|
||||
@@ -69,6 +71,8 @@ export const socketioMiddleware = () => {
|
||||
emitConvertToDiffusers,
|
||||
emitMergeDiffusersModels,
|
||||
emitRequestModelChange,
|
||||
emitGetLoraModels,
|
||||
emitGetTextualInversionTriggers,
|
||||
emitSaveStagingAreaImageToGallery,
|
||||
emitRequestEmptyTempFolder,
|
||||
} = makeSocketIOEmitters(store, socketio);
|
||||
@@ -145,6 +149,17 @@ export const socketioMiddleware = () => {
|
||||
onModelChangeFailed(data);
|
||||
});
|
||||
|
||||
socketio.on('foundLoras', (data: InvokeAI.FoundLorasRsponse) => {
|
||||
onFoundLoras(data);
|
||||
});
|
||||
|
||||
socketio.on(
|
||||
'foundTextualInversionTriggers',
|
||||
(data: InvokeAI.FoundTextualInversionTriggersResponse) => {
|
||||
onFoundTextualInversionTriggers(data);
|
||||
}
|
||||
);
|
||||
|
||||
socketio.on('tempFolderEmptied', () => {
|
||||
onTempFolderEmptied();
|
||||
});
|
||||
@@ -226,6 +241,16 @@ export const socketioMiddleware = () => {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/getLoraModels': {
|
||||
emitGetLoraModels();
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/getTextualInversionTriggers': {
|
||||
emitGetTextualInversionTriggers();
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/saveStagingAreaImageToGallery': {
|
||||
emitSaveStagingAreaImageToGallery(action.payload);
|
||||
break;
|
||||
|
||||
@@ -7,13 +7,14 @@ import {
|
||||
MenuButtonProps,
|
||||
MenuListProps,
|
||||
MenuItemProps,
|
||||
Text,
|
||||
} from '@chakra-ui/react';
|
||||
import { MouseEventHandler, ReactNode } from 'react';
|
||||
import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md';
|
||||
import IAIButton from './IAIButton';
|
||||
import IAIIconButton from './IAIIconButton';
|
||||
|
||||
interface IAIMenuItem {
|
||||
export interface IAIMenuItem {
|
||||
item: ReactNode | string;
|
||||
onClick: MouseEventHandler<HTMLButtonElement> | undefined;
|
||||
}
|
||||
@@ -43,6 +44,7 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
|
||||
|
||||
const renderMenuItems = () => {
|
||||
const menuItemsToRender: ReactNode[] = [];
|
||||
|
||||
menuItems.forEach((menuItem, index) => {
|
||||
menuItemsToRender.push(
|
||||
<MenuItem
|
||||
@@ -82,12 +84,17 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
|
||||
fontSize="1.5rem"
|
||||
{...menuButtonProps}
|
||||
>
|
||||
{menuType === 'regular' && buttonText}
|
||||
{menuType === 'regular' && (
|
||||
<Text fontSize="0.9rem">{buttonText}</Text>
|
||||
)}
|
||||
</MenuButton>
|
||||
<MenuList
|
||||
zIndex={15}
|
||||
padding={0}
|
||||
borderRadius="0.5rem"
|
||||
overflow="scroll"
|
||||
maxWidth={'22.5rem'}
|
||||
maxHeight={500}
|
||||
backgroundColor="var(--background-color-secondary)"
|
||||
color="var(--text-color-secondary)"
|
||||
borderColor="var(--border-color)"
|
||||
|
||||
@@ -34,7 +34,6 @@ export default function MainWidth() {
|
||||
withSliderMarks
|
||||
sliderMarkRightOffset={-8}
|
||||
inputWidth="6.2rem"
|
||||
inputReadOnly
|
||||
sliderNumberInputProps={{ max: 15360 }}
|
||||
/>
|
||||
) : (
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { getLoraModels } from 'app/socketio/actions';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import IAISimpleMenu, { IAIMenuItem } from 'common/components/IAISimpleMenu';
|
||||
import {
|
||||
setClearLoras,
|
||||
setLorasInUse,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdClear } from 'react-icons/md';
|
||||
|
||||
export default function LoraManager() {
|
||||
const dispatch = useAppDispatch();
|
||||
const foundLoras = useAppSelector((state) => state.system.foundLoras);
|
||||
const lorasInUse = useAppSelector((state) => state.generation.lorasInUse);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleLora = (lora: string) => {
|
||||
dispatch(setLorasInUse(lora));
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(getLoraModels());
|
||||
}, [dispatch]);
|
||||
|
||||
const renderLoraOption = (lora: string) => {
|
||||
const thisloraExists = lorasInUse.includes(lora);
|
||||
const loraExistsStyle = {
|
||||
fontWeight: 'bold',
|
||||
color: 'var(--context-menu-active-item)',
|
||||
};
|
||||
return <Box style={thisloraExists ? loraExistsStyle : {}}>{lora}</Box>;
|
||||
};
|
||||
|
||||
const numOfActiveLoras = () => {
|
||||
const foundLoraNames: string[] = [];
|
||||
foundLoras?.forEach((lora) => {
|
||||
foundLoraNames.push(lora.name);
|
||||
});
|
||||
return foundLoraNames.filter((lora) => lorasInUse.includes(lora)).length;
|
||||
};
|
||||
|
||||
const makeLoraItems = () => {
|
||||
const lorasFound: IAIMenuItem[] = [];
|
||||
foundLoras?.forEach((lora) => {
|
||||
if (lora.name !== ' ') {
|
||||
const newLoraItem: IAIMenuItem = {
|
||||
item: renderLoraOption(lora.name),
|
||||
onClick: () => handleLora(lora.name),
|
||||
};
|
||||
lorasFound.push(newLoraItem);
|
||||
}
|
||||
});
|
||||
return lorasFound;
|
||||
};
|
||||
|
||||
return foundLoras && foundLoras?.length > 0 ? (
|
||||
<Flex columnGap={2}>
|
||||
<IAISimpleMenu
|
||||
menuItems={makeLoraItems()}
|
||||
menuType="regular"
|
||||
buttonText={`${t('modelManager.addLora')} (${numOfActiveLoras()})`}
|
||||
menuButtonProps={{ width: '100%', padding: '0 1rem' }}
|
||||
/>
|
||||
<IAIIconButton
|
||||
icon={<MdClear />}
|
||||
tooltip={t('modelManager.clearLoras')}
|
||||
aria-label={t('modelManager.clearLoras')}
|
||||
onClick={() => dispatch(setClearLoras())}
|
||||
/>
|
||||
</Flex>
|
||||
) : (
|
||||
<Box
|
||||
background="var(--btn-base-color)"
|
||||
padding={2}
|
||||
textAlign="center"
|
||||
borderRadius={4}
|
||||
fontWeight="bold"
|
||||
>
|
||||
{t('modelManager.noLoraModels')}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import LoraManager from './LoraManager/LoraManager';
|
||||
import TextualInversionManager from './TextualInversionManager/TextualInversionManager';
|
||||
|
||||
export default function PromptExtras() {
|
||||
return (
|
||||
<Flex flexDir="column" rowGap={2}>
|
||||
<LoraManager />
|
||||
<TextualInversionManager />
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { getTextualInversionTriggers } from 'app/socketio/actions';
|
||||
import { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import IAISimpleMenu, { IAIMenuItem } from 'common/components/IAISimpleMenu';
|
||||
import {
|
||||
setAddTIToNegative,
|
||||
setClearTextualInversions,
|
||||
setTextualInversionsInUse,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdArrowDownward, MdClear } from 'react-icons/md';
|
||||
|
||||
export default function TextualInversionManager() {
|
||||
const dispatch = useAppDispatch();
|
||||
const textualInversionsInUse = useAppSelector(
|
||||
(state: RootState) => state.generation.textualInversionsInUse
|
||||
);
|
||||
|
||||
const negativeTextualInversionsInUse = useAppSelector(
|
||||
(state: RootState) => state.generation.negativeTextualInversionsInUse
|
||||
);
|
||||
|
||||
const foundLocalTextualInversionTriggers = useAppSelector(
|
||||
(state) => state.system.foundLocalTextualInversionTriggers
|
||||
);
|
||||
const foundHuggingFaceTextualInversionTriggers = useAppSelector(
|
||||
(state) => state.system.foundHuggingFaceTextualInversionTriggers
|
||||
);
|
||||
|
||||
const localTextualInversionTriggers = useAppSelector(
|
||||
(state) => state.generation.localTextualInversionTriggers
|
||||
);
|
||||
|
||||
const huggingFaceTextualInversionConcepts = useAppSelector(
|
||||
(state) => state.generation.huggingFaceTextualInversionConcepts
|
||||
);
|
||||
|
||||
const shouldShowHuggingFaceConcepts = useAppSelector(
|
||||
(state) => state.ui.shouldShowHuggingFaceConcepts
|
||||
);
|
||||
|
||||
const addTIToNegative = useAppSelector(
|
||||
(state) => state.generation.addTIToNegative
|
||||
);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(getTextualInversionTriggers());
|
||||
}, [dispatch]);
|
||||
|
||||
const handleTextualInversion = (textual_inversion: string) => {
|
||||
dispatch(setTextualInversionsInUse(textual_inversion));
|
||||
};
|
||||
|
||||
const TIPip = ({ color }: { color: string }) => {
|
||||
return (
|
||||
<Box width={2} height={2} borderRadius={9999} backgroundColor={color}>
|
||||
{' '}
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
const renderTextualInversionOption = (textual_inversion: string) => {
|
||||
return (
|
||||
<Flex alignItems="center" columnGap={1}>
|
||||
{textual_inversion}
|
||||
{textualInversionsInUse.includes(textual_inversion) && (
|
||||
<TIPip color="var(--context-menu-active-item)" />
|
||||
)}
|
||||
{negativeTextualInversionsInUse.includes(textual_inversion) && (
|
||||
<TIPip color="var(--status-bad-color)" />
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
const numOfActiveTextualInversions = () => {
|
||||
const allTextualInversions = localTextualInversionTriggers.concat(
|
||||
huggingFaceTextualInversionConcepts
|
||||
);
|
||||
return allTextualInversions.filter(
|
||||
(ti) =>
|
||||
textualInversionsInUse.includes(ti) ||
|
||||
negativeTextualInversionsInUse.includes(ti)
|
||||
).length;
|
||||
};
|
||||
|
||||
const makeTextualInversionItems = () => {
|
||||
const textualInversionsFound: IAIMenuItem[] = [];
|
||||
foundLocalTextualInversionTriggers?.forEach((textualInversion) => {
|
||||
if (textualInversion.name !== ' ') {
|
||||
const newTextualInversionItem: IAIMenuItem = {
|
||||
item: renderTextualInversionOption(textualInversion.name),
|
||||
onClick: () => handleTextualInversion(textualInversion.name),
|
||||
};
|
||||
textualInversionsFound.push(newTextualInversionItem);
|
||||
}
|
||||
});
|
||||
|
||||
if (shouldShowHuggingFaceConcepts) {
|
||||
foundHuggingFaceTextualInversionTriggers?.forEach((textualInversion) => {
|
||||
if (textualInversion.name !== ' ') {
|
||||
const newTextualInversionItem: IAIMenuItem = {
|
||||
item: renderTextualInversionOption(textualInversion.name),
|
||||
onClick: () => handleTextualInversion(textualInversion.name),
|
||||
};
|
||||
textualInversionsFound.push(newTextualInversionItem);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return textualInversionsFound;
|
||||
};
|
||||
|
||||
return foundLocalTextualInversionTriggers &&
|
||||
(foundLocalTextualInversionTriggers?.length > 0 ||
|
||||
(foundHuggingFaceTextualInversionTriggers &&
|
||||
foundHuggingFaceTextualInversionTriggers?.length > 0 &&
|
||||
shouldShowHuggingFaceConcepts)) ? (
|
||||
<Flex columnGap={2}>
|
||||
<IAISimpleMenu
|
||||
menuItems={makeTextualInversionItems()}
|
||||
menuType="regular"
|
||||
buttonText={`${t(
|
||||
'modelManager.addTextualInversionTrigger'
|
||||
)} (${numOfActiveTextualInversions()})`}
|
||||
menuButtonProps={{
|
||||
width: '100%',
|
||||
padding: '0 1rem',
|
||||
}}
|
||||
/>
|
||||
<IAIIconButton
|
||||
icon={<MdArrowDownward />}
|
||||
style={{
|
||||
backgroundColor: addTIToNegative ? 'var(--btn-delete-image)' : '',
|
||||
}}
|
||||
tooltip={t('modelManager.addTIToNegative')}
|
||||
aria-label={t('modelManager.addTIToNegative')}
|
||||
onClick={() => dispatch(setAddTIToNegative(!addTIToNegative))}
|
||||
/>
|
||||
<IAIIconButton
|
||||
icon={<MdClear />}
|
||||
tooltip={t('modelManager.clearTextualInversions')}
|
||||
aria-label={t('modelManager.clearTextualInversions')}
|
||||
onClick={() => dispatch(setClearTextualInversions())}
|
||||
/>
|
||||
</Flex>
|
||||
) : (
|
||||
<Box
|
||||
background="var(--btn-base-color)"
|
||||
padding={2}
|
||||
textAlign="center"
|
||||
borderRadius={4}
|
||||
fontWeight="bold"
|
||||
>
|
||||
{t('modelManager.noTextualInversionTriggers')}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -1,24 +1,43 @@
|
||||
import { FormControl, Textarea } from '@chakra-ui/react';
|
||||
import type { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import { setNegativePrompt } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
handlePromptCheckers,
|
||||
setNegativePrompt,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ChangeEvent, useState } from 'react';
|
||||
|
||||
const NegativePromptInput = () => {
|
||||
const negativePrompt = useAppSelector(
|
||||
(state: RootState) => state.generation.negativePrompt
|
||||
);
|
||||
|
||||
const [promptTimer, setPromptTimer] = useState<number | undefined>(undefined);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleNegativeChangePrompt = (e: ChangeEvent<HTMLTextAreaElement>) => {
|
||||
dispatch(setNegativePrompt(e.target.value));
|
||||
|
||||
// Debounce Prompt UI Checking
|
||||
clearTimeout(promptTimer);
|
||||
const newPromptTimer = window.setTimeout(() => {
|
||||
dispatch(
|
||||
handlePromptCheckers({ prompt: e.target.value, toNegative: true })
|
||||
);
|
||||
}, 500);
|
||||
setPromptTimer(newPromptTimer);
|
||||
};
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<Textarea
|
||||
id="negativePrompt"
|
||||
name="negativePrompt"
|
||||
value={negativePrompt}
|
||||
onChange={(e) => dispatch(setNegativePrompt(e.target.value))}
|
||||
onChange={handleNegativeChangePrompt}
|
||||
background="var(--prompt-bg-color)"
|
||||
placeholder={t('parameters.negativePrompts')}
|
||||
_placeholder={{ fontSize: '0.8rem' }}
|
||||
|
||||
@@ -2,12 +2,13 @@ import { FormControl, Textarea } from '@chakra-ui/react';
|
||||
import { generateImage } from 'app/socketio/actions';
|
||||
import { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import { ChangeEvent, KeyboardEvent, useRef } from 'react';
|
||||
import { ChangeEvent, KeyboardEvent, useRef, useState } from 'react';
|
||||
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { readinessSelector } from 'app/selectors/readinessSelector';
|
||||
import {
|
||||
GenerationState,
|
||||
handlePromptCheckers,
|
||||
setPrompt,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
@@ -40,11 +41,21 @@ const PromptInput = () => {
|
||||
const { isReady } = useAppSelector(readinessSelector);
|
||||
|
||||
const promptRef = useRef<HTMLTextAreaElement>(null);
|
||||
const [promptTimer, setPromptTimer] = useState<number | undefined>(undefined);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleChangePrompt = (e: ChangeEvent<HTMLTextAreaElement>) => {
|
||||
dispatch(setPrompt(e.target.value));
|
||||
|
||||
// Debounce Prompt UI Checking
|
||||
clearTimeout(promptTimer);
|
||||
const newPromptTimer = window.setTimeout(() => {
|
||||
dispatch(
|
||||
handlePromptCheckers({ prompt: e.target.value, toNegative: false })
|
||||
);
|
||||
}, 500);
|
||||
setPromptTimer(newPromptTimer);
|
||||
};
|
||||
|
||||
useHotkeys(
|
||||
|
||||
@@ -3,7 +3,11 @@ import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import promptToString from 'common/util/promptToString';
|
||||
import { useAppDispatch } from 'app/storeHooks';
|
||||
import { setNegativePrompt, setPrompt } from '../store/generationSlice';
|
||||
import {
|
||||
handlePromptCheckers,
|
||||
setNegativePrompt,
|
||||
setPrompt,
|
||||
} from '../store/generationSlice';
|
||||
|
||||
// TECHDEBT: We have two metadata prompt formats and need to handle recalling either of them.
|
||||
// This hook provides a function to do that.
|
||||
@@ -20,6 +24,10 @@ const useSetBothPrompts = () => {
|
||||
|
||||
dispatch(setPrompt(prompt));
|
||||
dispatch(setNegativePrompt(negativePrompt));
|
||||
dispatch(handlePromptCheckers({ prompt: prompt, toNegative: false }));
|
||||
dispatch(
|
||||
handlePromptCheckers({ prompt: negativePrompt, toNegative: true })
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||
@@ -17,6 +17,12 @@ export interface GenerationState {
|
||||
perlin: number;
|
||||
prompt: string;
|
||||
negativePrompt: string;
|
||||
lorasInUse: string[];
|
||||
huggingFaceTextualInversionConcepts: string[];
|
||||
localTextualInversionTriggers: string[];
|
||||
textualInversionsInUse: string[];
|
||||
negativeTextualInversionsInUse: string[];
|
||||
addTIToNegative: boolean;
|
||||
sampler: string;
|
||||
seamBlur: number;
|
||||
seamless: boolean;
|
||||
@@ -48,6 +54,12 @@ const initialGenerationState: GenerationState = {
|
||||
perlin: 0,
|
||||
prompt: '',
|
||||
negativePrompt: '',
|
||||
lorasInUse: [],
|
||||
huggingFaceTextualInversionConcepts: [],
|
||||
localTextualInversionTriggers: [],
|
||||
textualInversionsInUse: [],
|
||||
negativeTextualInversionsInUse: [],
|
||||
addTIToNegative: false,
|
||||
sampler: 'k_lms',
|
||||
seamBlur: 16,
|
||||
seamless: false,
|
||||
@@ -71,12 +83,99 @@ const initialGenerationState: GenerationState = {
|
||||
|
||||
const initialState: GenerationState = initialGenerationState;
|
||||
|
||||
const loraExists = (state: GenerationState, lora: string) => {
|
||||
const loraRegex = new RegExp(`withLora\\(${lora},?\\s*([^\\)]+)?\\)`);
|
||||
if (state.prompt.match(loraRegex)) return true;
|
||||
return false;
|
||||
};
|
||||
|
||||
const getTIRegex = (textualInversion: string) => {
|
||||
if (textualInversion.includes('<' || '>')) {
|
||||
return new RegExp(`${textualInversion}`);
|
||||
} else {
|
||||
return new RegExp(`\\b${textualInversion}\\b`);
|
||||
}
|
||||
};
|
||||
|
||||
const textualInversionExists = (
|
||||
state: GenerationState,
|
||||
textualInversion: string
|
||||
) => {
|
||||
const textualInversionRegex = getTIRegex(textualInversion);
|
||||
|
||||
if (!state.addTIToNegative) {
|
||||
if (state.prompt.match(textualInversionRegex)) return true;
|
||||
} else {
|
||||
if (state.negativePrompt.match(textualInversionRegex)) return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
const handleTypedTICheck = (
|
||||
state: GenerationState,
|
||||
newPrompt: string,
|
||||
toNegative: boolean
|
||||
) => {
|
||||
let textualInversionsInUse = !toNegative
|
||||
? [...state.textualInversionsInUse]
|
||||
: [...state.negativeTextualInversionsInUse]; // Get Words In Prompt
|
||||
|
||||
const textualInversionRegex = /([\w<>!@%&*_-]+)/g; // Scan For Each Word
|
||||
|
||||
const textualInversionMatches = [
|
||||
...newPrompt.matchAll(textualInversionRegex),
|
||||
]; // Match All Words
|
||||
|
||||
if (textualInversionMatches.length > 0) {
|
||||
textualInversionsInUse = []; // Reset Textual Inversions In Use
|
||||
|
||||
textualInversionMatches.forEach((textualInversionMatch) => {
|
||||
const textualInversionName = textualInversionMatch[0];
|
||||
if (
|
||||
(!textualInversionsInUse.includes(textualInversionName) &&
|
||||
state.localTextualInversionTriggers.includes(textualInversionName)) ||
|
||||
state.huggingFaceTextualInversionConcepts.includes(textualInversionName)
|
||||
) {
|
||||
textualInversionsInUse.push(textualInversionName); // Add Textual Inversions In Prompt
|
||||
}
|
||||
});
|
||||
} else {
|
||||
textualInversionsInUse = []; // If No Matches, Remove Textual Inversions In Use
|
||||
}
|
||||
|
||||
if (!toNegative) {
|
||||
state.textualInversionsInUse = textualInversionsInUse;
|
||||
} else {
|
||||
state.negativeTextualInversionsInUse = textualInversionsInUse;
|
||||
}
|
||||
};
|
||||
|
||||
const handleTypedLoraCheck = (state: GenerationState, newPrompt: string) => {
|
||||
let lorasInUse = [...state.lorasInUse]; // Get Loras In Prompt
|
||||
|
||||
const loraRegex = /withLora\(([^\\)]+)\)/g; // Scan For Lora Syntax
|
||||
const loraMatches = [...newPrompt.matchAll(loraRegex)]; // Match All Lora Syntaxes
|
||||
|
||||
if (loraMatches.length > 0) {
|
||||
lorasInUse = []; // Reset Loras In Use
|
||||
loraMatches.forEach((loraMatch) => {
|
||||
const loraName = loraMatch[1].split(',')[0];
|
||||
if (!lorasInUse.includes(loraName)) lorasInUse.push(loraName); // Add Loras In Prompt
|
||||
});
|
||||
} else {
|
||||
lorasInUse = []; // If No Matches, Remove Loras In Use
|
||||
}
|
||||
|
||||
state.lorasInUse = lorasInUse;
|
||||
};
|
||||
|
||||
export const generationSlice = createSlice({
|
||||
name: 'generation',
|
||||
initialState,
|
||||
reducers: {
|
||||
setPrompt: (state, action: PayloadAction<string | InvokeAI.Prompt>) => {
|
||||
const newPrompt = action.payload;
|
||||
|
||||
if (typeof newPrompt === 'string') {
|
||||
state.prompt = newPrompt;
|
||||
} else {
|
||||
@@ -94,6 +193,136 @@ export const generationSlice = createSlice({
|
||||
state.negativePrompt = promptToString(newPrompt);
|
||||
}
|
||||
},
|
||||
handlePromptCheckers: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
prompt: string | InvokeAI.Prompt;
|
||||
toNegative: boolean;
|
||||
}>
|
||||
) => {
|
||||
const newPrompt = action.payload.prompt;
|
||||
|
||||
if (typeof newPrompt === 'string') {
|
||||
if (!action.payload.toNegative) handleTypedLoraCheck(state, newPrompt);
|
||||
handleTypedTICheck(state, newPrompt, action.payload.toNegative);
|
||||
}
|
||||
},
|
||||
setLorasInUse: (state, action: PayloadAction<string>) => {
|
||||
const newLora = action.payload;
|
||||
const loras = [...state.lorasInUse];
|
||||
|
||||
if (loraExists(state, newLora)) {
|
||||
const loraRegex = new RegExp(
|
||||
`withLora\\(${newLora},?\\s*([^\\)]+)?\\)`,
|
||||
'g'
|
||||
);
|
||||
const newPrompt = state.prompt.replaceAll(loraRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
|
||||
if (loras.includes(newLora)) {
|
||||
const newLoraIndex = loras.indexOf(newLora);
|
||||
if (newLoraIndex > -1) loras.splice(newLoraIndex, 1);
|
||||
}
|
||||
} else {
|
||||
state.prompt = `${state.prompt.trim()} withLora(${newLora},0.75)`;
|
||||
if (!loras.includes(newLora)) loras.push(newLora);
|
||||
}
|
||||
state.lorasInUse = loras;
|
||||
},
|
||||
setClearLoras: (state) => {
|
||||
const lorasInUse = [...state.lorasInUse];
|
||||
|
||||
lorasInUse.forEach((lora) => {
|
||||
const loraRegex = new RegExp(
|
||||
`withLora\\(${lora},?\\s*([^\\)]+)?\\)`,
|
||||
'g'
|
||||
);
|
||||
const newPrompt = state.prompt.replaceAll(loraRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
});
|
||||
|
||||
state.lorasInUse = [];
|
||||
},
|
||||
setTextualInversionsInUse: (state, action: PayloadAction<string>) => {
|
||||
const newTextualInversion = action.payload;
|
||||
|
||||
const textualInversions = [...state.textualInversionsInUse];
|
||||
const negativeTextualInversions = [
|
||||
...state.negativeTextualInversionsInUse,
|
||||
];
|
||||
|
||||
if (textualInversionExists(state, newTextualInversion)) {
|
||||
const textualInversionRegex = getTIRegex(newTextualInversion);
|
||||
|
||||
if (!state.addTIToNegative) {
|
||||
const newPrompt = state.prompt.replace(textualInversionRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
|
||||
const newTIIndex = textualInversions.indexOf(newTextualInversion);
|
||||
if (newTIIndex > -1) textualInversions.splice(newTIIndex, 1);
|
||||
} else {
|
||||
const newPrompt = state.negativePrompt.replace(
|
||||
textualInversionRegex,
|
||||
''
|
||||
);
|
||||
state.negativePrompt = newPrompt.trim();
|
||||
|
||||
const newTIIndex =
|
||||
negativeTextualInversions.indexOf(newTextualInversion);
|
||||
if (newTIIndex > -1) negativeTextualInversions.splice(newTIIndex, 1);
|
||||
}
|
||||
} else {
|
||||
if (!state.addTIToNegative) {
|
||||
state.prompt = `${state.prompt.trim()} ${newTextualInversion}`;
|
||||
textualInversions.push(newTextualInversion);
|
||||
} else {
|
||||
state.negativePrompt = `${state.negativePrompt.trim()} ${newTextualInversion}`;
|
||||
negativeTextualInversions.push(newTextualInversion);
|
||||
}
|
||||
}
|
||||
|
||||
state.textualInversionsInUse = textualInversions;
|
||||
state.negativeTextualInversionsInUse = negativeTextualInversions;
|
||||
},
|
||||
setClearTextualInversions: (state) => {
|
||||
const textualInversions = [...state.textualInversionsInUse];
|
||||
const negativeTextualInversions = [
|
||||
...state.negativeTextualInversionsInUse,
|
||||
];
|
||||
|
||||
textualInversions.forEach((ti) => {
|
||||
const textualInversionRegex = getTIRegex(ti);
|
||||
const newPrompt = state.prompt.replace(textualInversionRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
});
|
||||
|
||||
negativeTextualInversions.forEach((ti) => {
|
||||
const textualInversionRegex = getTIRegex(ti);
|
||||
const newPrompt = state.negativePrompt.replace(
|
||||
textualInversionRegex,
|
||||
''
|
||||
);
|
||||
state.negativePrompt = newPrompt.trim();
|
||||
});
|
||||
|
||||
state.textualInversionsInUse = [];
|
||||
state.negativeTextualInversionsInUse = [];
|
||||
},
|
||||
setAddTIToNegative: (state, action: PayloadAction<boolean>) => {
|
||||
state.addTIToNegative = action.payload;
|
||||
},
|
||||
setLocalTextualInversionTriggers: (
|
||||
state,
|
||||
action: PayloadAction<string[]>
|
||||
) => {
|
||||
state.localTextualInversionTriggers = action.payload;
|
||||
},
|
||||
setHuggingFaceTextualInversionConcepts: (
|
||||
state,
|
||||
action: PayloadAction<string[]>
|
||||
) => {
|
||||
state.huggingFaceTextualInversionConcepts = action.payload;
|
||||
},
|
||||
setIterations: (state, action: PayloadAction<number>) => {
|
||||
state.iterations = action.payload;
|
||||
},
|
||||
@@ -374,6 +603,14 @@ export const {
|
||||
setPerlin,
|
||||
setPrompt,
|
||||
setNegativePrompt,
|
||||
handlePromptCheckers,
|
||||
setLorasInUse,
|
||||
setClearLoras,
|
||||
setHuggingFaceTextualInversionConcepts,
|
||||
setLocalTextualInversionTriggers,
|
||||
setTextualInversionsInUse,
|
||||
setAddTIToNegative,
|
||||
setClearTextualInversions,
|
||||
setSampler,
|
||||
setSeamBlur,
|
||||
setSeamless,
|
||||
|
||||
@@ -31,6 +31,7 @@ import {
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { uiSelector } from 'features/ui/store/uiSelectors';
|
||||
import {
|
||||
setShouldShowHuggingFaceConcepts,
|
||||
setShouldUseCanvasBetaLayout,
|
||||
setShouldUseSliders,
|
||||
} from 'features/ui/store/uiSlice';
|
||||
@@ -52,7 +53,11 @@ const selector = createSelector(
|
||||
enableImageDebugging,
|
||||
} = system;
|
||||
|
||||
const { shouldUseCanvasBetaLayout, shouldUseSliders } = ui;
|
||||
const {
|
||||
shouldUseCanvasBetaLayout,
|
||||
shouldUseSliders,
|
||||
shouldShowHuggingFaceConcepts,
|
||||
} = ui;
|
||||
|
||||
return {
|
||||
shouldDisplayInProgressType,
|
||||
@@ -63,6 +68,7 @@ const selector = createSelector(
|
||||
enableImageDebugging,
|
||||
shouldUseCanvasBetaLayout,
|
||||
shouldUseSliders,
|
||||
shouldShowHuggingFaceConcepts,
|
||||
};
|
||||
},
|
||||
{
|
||||
@@ -107,6 +113,7 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
|
||||
enableImageDebugging,
|
||||
shouldUseCanvasBetaLayout,
|
||||
shouldUseSliders,
|
||||
shouldShowHuggingFaceConcepts,
|
||||
} = useAppSelector(selector);
|
||||
|
||||
/**
|
||||
@@ -206,6 +213,14 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
|
||||
dispatch(setShouldUseSliders(e.target.checked))
|
||||
}
|
||||
/>
|
||||
<IAISwitch
|
||||
styleClass="settings-modal-item"
|
||||
label={t('settings.showHuggingFaceConcepts')}
|
||||
isChecked={shouldShowHuggingFaceConcepts}
|
||||
onChange={(e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldShowHuggingFaceConcepts(e.target.checked))
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="settings-modal-items">
|
||||
|
||||
@@ -51,6 +51,13 @@ export interface SystemState
|
||||
toastQueue: UseToastOptions[];
|
||||
searchFolder: string | null;
|
||||
foundModels: InvokeAI.FoundModel[] | null;
|
||||
foundLoras: InvokeAI.FoundLora[] | null;
|
||||
foundLocalTextualInversionTriggers:
|
||||
| InvokeAI.FoundTextualInversionTriggers[]
|
||||
| null;
|
||||
foundHuggingFaceTextualInversionTriggers:
|
||||
| InvokeAI.FoundTextualInversionTriggers[]
|
||||
| null;
|
||||
openModel: string | null;
|
||||
cancelOptions: {
|
||||
cancelType: CancelType;
|
||||
@@ -93,6 +100,9 @@ const initialSystemState: SystemState = {
|
||||
toastQueue: [],
|
||||
searchFolder: null,
|
||||
foundModels: null,
|
||||
foundLoras: null,
|
||||
foundLocalTextualInversionTriggers: null,
|
||||
foundHuggingFaceTextualInversionTriggers: null,
|
||||
openModel: null,
|
||||
cancelOptions: {
|
||||
cancelType: 'immediate',
|
||||
@@ -262,6 +272,24 @@ export const systemSlice = createSlice({
|
||||
) => {
|
||||
state.foundModels = action.payload;
|
||||
},
|
||||
setFoundLoras: (
|
||||
state,
|
||||
action: PayloadAction<InvokeAI.FoundLora[] | null>
|
||||
) => {
|
||||
state.foundLoras = action.payload;
|
||||
},
|
||||
setFoundLocalTextualInversionTriggers: (
|
||||
state,
|
||||
action: PayloadAction<InvokeAI.FoundTextualInversionTriggers[] | null>
|
||||
) => {
|
||||
state.foundLocalTextualInversionTriggers = action.payload;
|
||||
},
|
||||
setFoundHuggingFaceTextualInversionTriggers: (
|
||||
state,
|
||||
action: PayloadAction<InvokeAI.FoundTextualInversionTriggers[] | null>
|
||||
) => {
|
||||
state.foundHuggingFaceTextualInversionTriggers = action.payload;
|
||||
},
|
||||
setOpenModel: (state, action: PayloadAction<string | null>) => {
|
||||
state.openModel = action.payload;
|
||||
},
|
||||
@@ -303,6 +331,9 @@ export const {
|
||||
setProcessingIndeterminateTask,
|
||||
setSearchFolder,
|
||||
setFoundModels,
|
||||
setFoundLoras,
|
||||
setFoundLocalTextualInversionTriggers,
|
||||
setFoundHuggingFaceTextualInversionTriggers,
|
||||
setOpenModel,
|
||||
setCancelType,
|
||||
setCancelAfter,
|
||||
|
||||
@@ -18,6 +18,7 @@ import PromptInput from 'features/parameters/components/PromptInput/PromptInput'
|
||||
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import ImageToImageOptions from './ImageToImageOptions';
|
||||
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
|
||||
|
||||
export default function ImageToImagePanel() {
|
||||
const { t } = useTranslation();
|
||||
@@ -63,6 +64,7 @@ export default function ImageToImagePanel() {
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
<PromptExtras />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainSettings />
|
||||
|
||||
@@ -17,6 +17,7 @@ import NegativePromptInput from 'features/parameters/components/PromptInput/Nega
|
||||
import PromptInput from 'features/parameters/components/PromptInput/PromptInput';
|
||||
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
|
||||
|
||||
export default function TextToImagePanel() {
|
||||
const { t } = useTranslation();
|
||||
@@ -62,6 +63,7 @@ export default function TextToImagePanel() {
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
<PromptExtras />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainSettings />
|
||||
|
||||
@@ -17,6 +17,7 @@ import NegativePromptInput from 'features/parameters/components/PromptInput/Nega
|
||||
import PromptInput from 'features/parameters/components/PromptInput/PromptInput';
|
||||
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
|
||||
|
||||
export default function UnifiedCanvasPanel() {
|
||||
const { t } = useTranslation();
|
||||
@@ -73,6 +74,7 @@ export default function UnifiedCanvasPanel() {
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
<PromptExtras />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainSettings />
|
||||
|
||||
@@ -15,6 +15,7 @@ const initialtabsState: UIState = {
|
||||
shouldUseCanvasBetaLayout: false,
|
||||
shouldShowExistingModelsInSearch: false,
|
||||
shouldUseSliders: false,
|
||||
shouldShowHuggingFaceConcepts: false,
|
||||
addNewModelUIOption: null,
|
||||
};
|
||||
|
||||
@@ -70,6 +71,12 @@ export const uiSlice = createSlice({
|
||||
setShouldUseSliders: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldUseSliders = action.payload;
|
||||
},
|
||||
setShouldShowHuggingFaceConcepts: (
|
||||
state,
|
||||
action: PayloadAction<boolean>
|
||||
) => {
|
||||
state.shouldShowHuggingFaceConcepts = action.payload;
|
||||
},
|
||||
setAddNewModelUIOption: (state, action: PayloadAction<AddNewModelType>) => {
|
||||
state.addNewModelUIOption = action.payload;
|
||||
},
|
||||
@@ -88,6 +95,7 @@ export const {
|
||||
setShouldUseCanvasBetaLayout,
|
||||
setShouldShowExistingModelsInSearch,
|
||||
setShouldUseSliders,
|
||||
setShouldShowHuggingFaceConcepts,
|
||||
setAddNewModelUIOption,
|
||||
} = uiSlice.actions;
|
||||
|
||||
|
||||
@@ -12,5 +12,6 @@ export interface UIState {
|
||||
shouldUseCanvasBetaLayout: boolean;
|
||||
shouldShowExistingModelsInSearch: boolean;
|
||||
shouldUseSliders: boolean;
|
||||
shouldShowHuggingFaceConcepts: boolean;
|
||||
addNewModelUIOption: AddNewModelType;
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
--accent-color-bright: rgb(104, 60, 230);
|
||||
--accent-color-hover: var(--accent-color-bright);
|
||||
|
||||
// App Colors
|
||||
--root-bg-color: rgb(10, 10, 10);
|
||||
--background-color: rgb(26, 26, 32);
|
||||
--background-color-light: rgb(40, 44, 48);
|
||||
@@ -119,6 +118,7 @@
|
||||
--context-menu-bg-color: rgb(46, 48, 58);
|
||||
--context-menu-box-shadow: none;
|
||||
--context-menu-bg-color-hover: rgb(30, 32, 42);
|
||||
--context-menu-active-item: var(--accent-color-bright);
|
||||
|
||||
// Shadows
|
||||
--floating-button-drop-shadow-color: var(--accent-color);
|
||||
|
||||
@@ -117,6 +117,7 @@
|
||||
--context-menu-bg-color: rgb(46, 48, 58);
|
||||
--context-menu-box-shadow: none;
|
||||
--context-menu-bg-color-hover: rgb(30, 32, 42);
|
||||
--context-menu-active-item: var(--accent-color-bright);
|
||||
|
||||
// Shadows
|
||||
--floating-button-drop-shadow-color: var(--accent-color);
|
||||
|
||||
@@ -114,6 +114,7 @@
|
||||
--context-menu-box-shadow: 0px 10px 38px -10px rgba(22, 23, 24, 0.35),
|
||||
0px 10px 20px -15px rgba(22, 23, 24, 0.2);
|
||||
--context-menu-bg-color-hover: var(--background-color-secondary);
|
||||
--context-menu-active-item: rgb(0, 0, 0);
|
||||
|
||||
// Shadows
|
||||
--floating-button-drop-shadow-color: rgba(0, 0, 0, 0.7);
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -56,26 +56,26 @@
|
||||
"@babel/helper-validator-identifier" "^7.19.1"
|
||||
to-fast-properties "^2.0.0"
|
||||
|
||||
"@chakra-ui/accordion@2.1.9":
|
||||
version "2.1.9"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/accordion/-/accordion-2.1.9.tgz#20fa86d94dc034251df2f7c8595ae4dd541a29d9"
|
||||
integrity sha512-a9CKIAUHezc0f5FR/SQ4GVxnWuIb2HbDTxTEKTp58w/J9pecIbJaNrJ5TUZ0MVbDU9jkgO9RsZ29jkja8PomAw==
|
||||
"@chakra-ui/accordion@2.1.11":
|
||||
version "2.1.11"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/accordion/-/accordion-2.1.11.tgz#c6df0100c543645d0631df3aefde2ea2b8ed6313"
|
||||
integrity sha512-mfVPmqETp9pyRDHJ33AdF19oHv/LyxVzQJtlxUByuvs8Cj9QQZ2LQLg5kejm+b3mj03A7A6yfbuo3RNaI4Bhsg==
|
||||
dependencies:
|
||||
"@chakra-ui/descendant" "3.0.13"
|
||||
"@chakra-ui/descendant" "3.0.14"
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/transition" "2.0.15"
|
||||
"@chakra-ui/transition" "2.0.16"
|
||||
|
||||
"@chakra-ui/alert@2.0.17":
|
||||
version "2.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/alert/-/alert-2.0.17.tgz#b129732ec308db6a6a1afa7c06a6595ad853c967"
|
||||
integrity sha512-0Y5vw+HkeXpwbL1roVpSSNM6luMRmUbwduUSHEA4OnX1ismvsDb1ZBfpi4Vxp6w8euJ2Uj6df3krbd5tbCP6tg==
|
||||
"@chakra-ui/alert@2.1.0":
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/alert/-/alert-2.1.0.tgz#7a234ac6426231b39243088648455cbcf1cbdf24"
|
||||
integrity sha512-OcfHwoXI5VrmM+tHJTHT62Bx6TfyfCxSa0PWUOueJzSyhlUOKBND5we6UtrOB7D0jwX45qKKEDJOLG5yCG21jQ==
|
||||
dependencies:
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/spinner" "2.0.13"
|
||||
|
||||
@@ -84,23 +84,23 @@
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/anatomy/-/anatomy-2.1.2.tgz#ea66b1841e7195da08ddc862daaa3f3e56e565f5"
|
||||
integrity sha512-pKfOS/mztc4sUXHNc8ypJ1gPWSolWT770jrgVRfolVbYlki8y5Y+As996zMF6k5lewTu6j9DQequ7Cc9a69IVQ==
|
||||
|
||||
"@chakra-ui/avatar@2.2.5":
|
||||
version "2.2.5"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/avatar/-/avatar-2.2.5.tgz#50eb7cc5a172d394b301fa0abd5f607b7f5d3563"
|
||||
integrity sha512-TEHXuGE79+fEn61qJ7J/A0Ec+WjyNwobrDTATcLg9Zx2/WEMmZNfrWIAlI5ANQAwVbdSWeGVbyoLAK5mbcrE0A==
|
||||
"@chakra-ui/avatar@2.2.8":
|
||||
version "2.2.8"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/avatar/-/avatar-2.2.8.tgz#a6e16accb2bb9c879f197090ccc9df1ff42992a6"
|
||||
integrity sha512-uBs9PMrqyK111tPIYIKnOM4n3mwgKqGpvYmtwBnnbQLTNLg4gtiWWVbpTuNMpyu1av0xQYomjUt8Doed8w6p8g==
|
||||
dependencies:
|
||||
"@chakra-ui/image" "2.0.15"
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/breadcrumb@2.1.4":
|
||||
version "2.1.4"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/breadcrumb/-/breadcrumb-2.1.4.tgz#0d249dc2a92639bd2bf46d097dd5445112bd2367"
|
||||
integrity sha512-vyBx5TAxPnHhb0b8nyRGfqyjleD//9mySFhk96c9GL+T6YDO4swHw5y/kvDv3Ngc/iRwJ9hdI49PZKwPxLqsEg==
|
||||
"@chakra-ui/breadcrumb@2.1.5":
|
||||
version "2.1.5"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/breadcrumb/-/breadcrumb-2.1.5.tgz#a43b22cc8005291a615696a8c88efc37064562f3"
|
||||
integrity sha512-p3eQQrHQBkRB69xOmNyBJqEdfCrMt+e0eOH+Pm/DjFWfIVIbnIaFbmDCeWClqlLa21Ypc6h1hR9jEmvg8kmOog==
|
||||
dependencies:
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/breakpoint-utils@2.0.8":
|
||||
@@ -110,12 +110,12 @@
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/button@2.0.16":
|
||||
version "2.0.16"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/button/-/button-2.0.16.tgz#ff315b57ee47c3511a6507fcfb6f00bb93e2ac7d"
|
||||
integrity sha512-NjuTKa7gNhnGSUutKuTc8HoAOe9WWIigpciBG7yj3ok67kg8bXtSzPyQFZlgTY6XGdAckWTT+Do4tvhwa5LA+g==
|
||||
"@chakra-ui/button@2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/button/-/button-2.0.18.tgz#c13d2e404e22a9873ba5373fde494bedafe32fdd"
|
||||
integrity sha512-E3c99+lOm6ou4nQVOTLkG+IdOPMjsQK+Qe7VyP8A/xeAMFONuibrWPRPpprr4ZkB4kEoLMfNuyH2+aEza3ScUA==
|
||||
dependencies:
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/spinner" "2.0.13"
|
||||
@@ -127,13 +127,13 @@
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/checkbox@2.2.10":
|
||||
version "2.2.10"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/checkbox/-/checkbox-2.2.10.tgz#e4f773e7d2464f1d6e9d18dd88b679290cb33171"
|
||||
integrity sha512-vzxEjw99qj7loxAdP1WuHNt4EAvj/t6cc8oxyOB2mEvkAzhxI34rLR+3zWDuHWsmhyUO+XEDh4FiWdR+DK5Siw==
|
||||
"@chakra-ui/checkbox@2.2.14":
|
||||
version "2.2.14"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/checkbox/-/checkbox-2.2.14.tgz#902acc99a9a80c1c304788a230cf36f8116e8260"
|
||||
integrity sha512-uqo6lFWLqYBujPglrvRhTAErtuIXpmdpc5w0W4bjK7kyvLhxOpUh1hlDb2WoqlNpfRn/OaNeF6VinPnf9BJL8w==
|
||||
dependencies:
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-callback-ref" "2.0.7"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
@@ -142,7 +142,7 @@
|
||||
"@chakra-ui/react-use-update-effect" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/visually-hidden" "2.0.15"
|
||||
"@zag-js/focus-visible" "0.2.1"
|
||||
"@zag-js/focus-visible" "0.2.2"
|
||||
|
||||
"@chakra-ui/clickable@2.0.14":
|
||||
version "2.0.14"
|
||||
@@ -180,17 +180,17 @@
|
||||
"@chakra-ui/react-use-callback-ref" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/css-reset@2.0.12":
|
||||
version "2.0.12"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/css-reset/-/css-reset-2.0.12.tgz#6eebcbe9e971facd215e174e063ace29f647a045"
|
||||
integrity sha512-Q5OYIMvqTl2vZ947kIYxcS5DhQXeStB84BzzBd6C10wOx1gFUu9pL+jLpOnHR3hhpWRMdX5o7eT+gMJWIYUZ0Q==
|
||||
"@chakra-ui/css-reset@2.1.1":
|
||||
version "2.1.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/css-reset/-/css-reset-2.1.1.tgz#c61f3d2103c13e62a86fd2d359682092e961852c"
|
||||
integrity sha512-jwEOfIAWmQsnChHQTW/eRE+dfE4MjmhvSvoUug5nkV1pI7veC/20noFlIZxzi82EbiQI8Fs0+Jnusgxr2yaOHA==
|
||||
|
||||
"@chakra-ui/descendant@3.0.13":
|
||||
version "3.0.13"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/descendant/-/descendant-3.0.13.tgz#e883a2233ee07fe1ae6c014567824c0f79df11cf"
|
||||
integrity sha512-9nzxZVxUSMc4xPL5fSaRkEOQjDQWUGjGvrZI7VzWk9eq63cojOtIxtWMSW383G9148PzWJjJYt30Eud5tdZzlg==
|
||||
"@chakra-ui/descendant@3.0.14":
|
||||
version "3.0.14"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/descendant/-/descendant-3.0.14.tgz#fe8bac3f0e1ffe562e3e73eac393dbf222d57e13"
|
||||
integrity sha512-+Ahvp9H4HMpfScIv9w1vaecGz7qWAaK1YFHHolz/SIsGLaLGlbdp+5UNabQC7L6TUnzzJDQDxzwif78rTD7ang==
|
||||
dependencies:
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
|
||||
"@chakra-ui/dom-utils@2.0.6":
|
||||
@@ -198,12 +198,12 @@
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/dom-utils/-/dom-utils-2.0.6.tgz#68f49f3b4a0bdebd5e416d6fd2c012c9ad64b76a"
|
||||
integrity sha512-PVtDkPrDD5b8aoL6Atg7SLjkwhWb7BwMcLOF1L449L3nZN+DAO3nyAh6iUhZVJyunELj9d0r65CDlnMREyJZmA==
|
||||
|
||||
"@chakra-ui/editable@2.0.19":
|
||||
version "2.0.19"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/editable/-/editable-2.0.19.tgz#1af2fe3c215111f61f7872fb5f599f4d8da24e7d"
|
||||
integrity sha512-YxRJsJ2JQd42zfPBgTKzIhg1HugT+gfQz1ZosmUN+IZT9YZXL2yodHTUz6Lee04Vc/CdEqgBFLuREXEUNBfGtA==
|
||||
"@chakra-ui/editable@2.0.21":
|
||||
version "2.0.21"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/editable/-/editable-2.0.21.tgz#bc74510470d6d455844438e540851896d3879132"
|
||||
integrity sha512-oYuXbHnggxSYJN7P9Pn0Scs9tPC91no4z1y58Oe+ILoJKZ+bFAEHtL7FEISDNJxw++MEukeFu7GU1hVqmdLsKQ==
|
||||
dependencies:
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-callback-ref" "2.0.7"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
@@ -226,13 +226,13 @@
|
||||
"@chakra-ui/dom-utils" "2.0.6"
|
||||
react-focus-lock "^2.9.2"
|
||||
|
||||
"@chakra-ui/form-control@2.0.17":
|
||||
version "2.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/form-control/-/form-control-2.0.17.tgz#2f710325e77ce35067337616d440f903b137bdd5"
|
||||
integrity sha512-34ptCaJ2LNvQNOlB6MAKsmH1AkT1xo7E+3Vw10Urr81yTOjDTM/iU6vG3JKPfRDMyXeowPjXmutlnuk72SSjRg==
|
||||
"@chakra-ui/form-control@2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/form-control/-/form-control-2.0.18.tgz#1923f293afde70b2b07ca731d98fef3660098c56"
|
||||
integrity sha512-I0a0jG01IAtRPccOXSNugyRdUAe8Dy40ctqedZvznMweOXzbMCF1m+sHPLdWeWC/VI13VoAispdPY0/zHOdjsQ==
|
||||
dependencies:
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
@@ -254,10 +254,10 @@
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/icons@^2.0.17":
|
||||
version "2.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/icons/-/icons-2.0.17.tgz#625a46d169707aad36d65c04a4626a422f92e5ae"
|
||||
integrity sha512-HMJP0WrJgAmFR9+Xh/CBH0nVnGMsJ4ZC8MK6tMgxPKd9/muvn0I4hsicHqdPlLpmB0TlxlhkBAKaVMtOdz6F0w==
|
||||
"@chakra-ui/icons@^2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/icons/-/icons-2.0.18.tgz#6f859d2e0d8f31fea9cb2e6507d65eb65cb95cf5"
|
||||
integrity sha512-E/+DF/jw7kdN4/XxCZRnr4FdMXhkl50Q34MVwN9rADWMwPK9uSZPGyC7HOx6rilo7q4bFjYDH3yRj9g+VfbVkg==
|
||||
dependencies:
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
|
||||
@@ -269,27 +269,27 @@
|
||||
"@chakra-ui/react-use-safe-layout-effect" "2.0.5"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/input@2.0.20":
|
||||
version "2.0.20"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/input/-/input-2.0.20.tgz#8db3ec46b52be901c94599b3659a9003bdb2dd07"
|
||||
integrity sha512-ypmsy4n4uNBVgn6Gd24Zrpi+qRf/T9WEzWkysuYC9Qfxo+i7yuf3snp7XmBy8KSGVSiXE11eO8ZN5oCg6Xg0jg==
|
||||
"@chakra-ui/input@2.0.21":
|
||||
version "2.0.21"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/input/-/input-2.0.21.tgz#a7e55ea6fa32ae39c0f6ec44ca2189933fda9eb5"
|
||||
integrity sha512-AIWjjg6MgcOtlvKmVoZfPPfgF+sBSWL3Zq2HSCAMvS6h7jfxz/Xv0UTFGPk5F4Wt0YHT7qMySg0Jsm0b78HZJg==
|
||||
dependencies:
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/object-utils" "2.0.8"
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/layout@2.1.16":
|
||||
version "2.1.16"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/layout/-/layout-2.1.16.tgz#9d90f25cf9f0537d19cd36a417f7ddc1461e8591"
|
||||
integrity sha512-QFS3feozIGsvB0H74lUocev55aRF26eNrdmhfJifwikZAiq+zzZAMdBdNU9UJhHClnMOU8/iGZ0MF7ti4zQS1A==
|
||||
"@chakra-ui/layout@2.1.18":
|
||||
version "2.1.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/layout/-/layout-2.1.18.tgz#f5dba687dfced9145d495f3a21edb5672df6bb73"
|
||||
integrity sha512-F4Gh2e+DGdaWdWT5NZduIFD9NM7Bnuh8sXARFHWPvIu7yvAwZ3ddqC9GK4F3qUngdmkJxDLWQqRSwSh96Lxbhw==
|
||||
dependencies:
|
||||
"@chakra-ui/breakpoint-utils" "2.0.8"
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/object-utils" "2.0.8"
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/lazy-utils@2.0.5":
|
||||
@@ -311,17 +311,17 @@
|
||||
"@chakra-ui/react-env" "3.0.0"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/menu@2.1.9":
|
||||
version "2.1.9"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/menu/-/menu-2.1.9.tgz#2f3239a9b2855fd77fc317d9e6b904c1ad50d7c6"
|
||||
integrity sha512-ue5nD4QJcl3H3UwN0zZNJmH89XUebnvEdW6THAUL41hDjJ0J/Fjpg9Sgzwug2aBbBXBNbVMsUuhcCj6x91d+IQ==
|
||||
"@chakra-ui/menu@2.1.12":
|
||||
version "2.1.12"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/menu/-/menu-2.1.12.tgz#ab83b7a5165bd31a6c68328d7f65a79e3412c48d"
|
||||
integrity sha512-ylNK1VJlr/3/EGg9dLPZ87cBJJjeiYXeU/gOAphsKXMnByrXWhbp4YVnyyyha2KZ0zEw0aPU4nCZ+A69aT9wrg==
|
||||
dependencies:
|
||||
"@chakra-ui/clickable" "2.0.14"
|
||||
"@chakra-ui/descendant" "3.0.13"
|
||||
"@chakra-ui/descendant" "3.0.14"
|
||||
"@chakra-ui/lazy-utils" "2.0.5"
|
||||
"@chakra-ui/popper" "3.0.13"
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-animation-state" "2.0.8"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
"@chakra-ui/react-use-disclosure" "2.0.8"
|
||||
@@ -330,33 +330,33 @@
|
||||
"@chakra-ui/react-use-outside-click" "2.0.7"
|
||||
"@chakra-ui/react-use-update-effect" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/transition" "2.0.15"
|
||||
"@chakra-ui/transition" "2.0.16"
|
||||
|
||||
"@chakra-ui/modal@2.2.9":
|
||||
version "2.2.9"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/modal/-/modal-2.2.9.tgz#aad65a2c60aa974e023f8b3facc0e79eb742e006"
|
||||
integrity sha512-nTfNp7XsVwn5+xJOtstoFA8j0kq/9sJj7KesyYzjEDaMKvCZvIOntRYowoydho43jb4+YC7ebKhp0KOIINS0gg==
|
||||
"@chakra-ui/modal@2.2.11":
|
||||
version "2.2.11"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/modal/-/modal-2.2.11.tgz#8a964288759f3d681e23bfc3a837a3e2c7523f8e"
|
||||
integrity sha512-2J0ZUV5tEzkPiawdkgPz6bmex7NXAde1VXooMwdvK+vuT8PV3U61yorTJOZVLdw7TjjI1Yo94mzsp6UwBud43Q==
|
||||
dependencies:
|
||||
"@chakra-ui/close-button" "2.0.17"
|
||||
"@chakra-ui/focus-lock" "2.0.16"
|
||||
"@chakra-ui/portal" "2.0.15"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/portal" "2.0.16"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/transition" "2.0.15"
|
||||
"@chakra-ui/transition" "2.0.16"
|
||||
aria-hidden "^1.2.2"
|
||||
react-remove-scroll "^2.5.5"
|
||||
|
||||
"@chakra-ui/number-input@2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/number-input/-/number-input-2.0.18.tgz#072a00ef869ebafa4960cfdee8caae8208864289"
|
||||
integrity sha512-cPkyAFFHHzeFBselrT1BtjlzMkJ6TKrTDUnHFlzqXy6aqeXuhrjFhMfXucjedSpOqedsP9ZbKFTdIAhu9DdL/A==
|
||||
"@chakra-ui/number-input@2.0.19":
|
||||
version "2.0.19"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/number-input/-/number-input-2.0.19.tgz#82d4522036904c04d07e7050822fc522f9b32233"
|
||||
integrity sha512-HDaITvtMEqOauOrCPsARDxKD9PSHmhWywpcyCSOX0lMe4xx2aaGhU0QQFhsJsykj8Er6pytMv6t0KZksdDv3YA==
|
||||
dependencies:
|
||||
"@chakra-ui/counter" "2.0.14"
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-callback-ref" "2.0.7"
|
||||
"@chakra-ui/react-use-event-listener" "2.0.7"
|
||||
@@ -376,27 +376,27 @@
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/object-utils/-/object-utils-2.0.8.tgz#307f927f6434f99feb32ba92bdf451a6b59a6199"
|
||||
integrity sha512-2upjT2JgRuiupdrtBWklKBS6tqeGMA77Nh6Q0JaoQuH/8yq+15CGckqn3IUWkWoGI0Fg3bK9LDlbbD+9DLw95Q==
|
||||
|
||||
"@chakra-ui/pin-input@2.0.19":
|
||||
version "2.0.19"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/pin-input/-/pin-input-2.0.19.tgz#f9b196174f0518feec5c1ee3fcaf2134c301148a"
|
||||
integrity sha512-6O7s4vWz4cqQ6zvMov9sYj6ZqWAsTxR/MNGe3DNgu1zWQg8veNCYtj1rNGhNS3eZNUMAa8uM2dXIphGTP53Xow==
|
||||
"@chakra-ui/pin-input@2.0.20":
|
||||
version "2.0.20"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/pin-input/-/pin-input-2.0.20.tgz#5bf115bf4282b69fc6532a9c542cbf41f815d200"
|
||||
integrity sha512-IHVmerrtHN8F+jRB3W1HnMir1S1TUCWhI7qDInxqPtoRffHt6mzZgLZ0izx8p1fD4HkW4c1d4/ZLEz9uH9bBRg==
|
||||
dependencies:
|
||||
"@chakra-ui/descendant" "3.0.13"
|
||||
"@chakra-ui/descendant" "3.0.14"
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/popover@2.1.8":
|
||||
version "2.1.8"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/popover/-/popover-2.1.8.tgz#e906ce0533693d735b6e13a3a6ffe16d8e0a9ab4"
|
||||
integrity sha512-ob7fAz+WWmXIq7iGHVB3wDKzZTj+T+noYBT/U1Q+jIf+jMr2WOpJLTfb0HTZcfhvn4EBFlfBg7Wk5qbXNaOn7g==
|
||||
"@chakra-ui/popover@2.1.9":
|
||||
version "2.1.9"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/popover/-/popover-2.1.9.tgz#890cc0dfc5022757715ccf772ec194e7a409275f"
|
||||
integrity sha512-OMJ12VVs9N32tFaZSOqikkKPtwAVwXYsES/D1pff/amBrE3ngCrpxJSIp4uvTdORfIYDojJqrR52ZplDKS9hRQ==
|
||||
dependencies:
|
||||
"@chakra-ui/close-button" "2.0.17"
|
||||
"@chakra-ui/lazy-utils" "2.0.5"
|
||||
"@chakra-ui/popper" "3.0.13"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-animation-state" "2.0.8"
|
||||
"@chakra-ui/react-use-disclosure" "2.0.8"
|
||||
@@ -414,53 +414,53 @@
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@popperjs/core" "^2.9.3"
|
||||
|
||||
"@chakra-ui/portal@2.0.15":
|
||||
version "2.0.15"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/portal/-/portal-2.0.15.tgz#21e1f97c4407fc15df8c365cb5cf799dac73ce41"
|
||||
integrity sha512-z8v7K3j1/nMuBzp2+wRIIw7s/eipVtnXLdjK5yqbMxMRa44E8Mu5VNJLz3aQFLHXEUST+ifqrjImQeli9do6LQ==
|
||||
"@chakra-ui/portal@2.0.16":
|
||||
version "2.0.16"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/portal/-/portal-2.0.16.tgz#e5ce3f9d9e559f17a95276e0c006d0e9b7703442"
|
||||
integrity sha512-bVID0qbQ0l4xq38LdqAN4EKD4/uFkDnXzFwOlviC9sl0dNhzICDb1ltuH/Adl1d2HTMqyN60O3GO58eHy7plnQ==
|
||||
dependencies:
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-safe-layout-effect" "2.0.5"
|
||||
|
||||
"@chakra-ui/progress@2.1.5":
|
||||
version "2.1.5"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/progress/-/progress-2.1.5.tgz#eb6a47adf2bff93971262d163461d390782a04ff"
|
||||
integrity sha512-jj5Vp4lxUchuwp4RPCepM0yAyKi344bgsOd3Apd+ldxclDcewPc82fbwDu7g/Xv27LqJkT+7E/SlQy04wGrk0g==
|
||||
"@chakra-ui/progress@2.1.6":
|
||||
version "2.1.6"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/progress/-/progress-2.1.6.tgz#398db20440979c37adb0a34821f805ae3471873b"
|
||||
integrity sha512-hHh5Ysv4z6bK+j2GJbi/FT9CVyto2PtNUNwBmr3oNMVsoOUMoRjczfXvvYqp0EHr9PCpxqrq7sRwgQXUzhbDSw==
|
||||
dependencies:
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
|
||||
"@chakra-ui/provider@2.1.2":
|
||||
version "2.1.2"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/provider/-/provider-2.1.2.tgz#b025cb718826b003b3c9535b6961e8f3be70ebd5"
|
||||
integrity sha512-4lLlz8QuJv00BhfyKzWpzfoti9MDOdJ/MqXixJV/EZ02RMBOdE9qy9bSz/WckPC2MVhtRUuwMkxH+0QY21PXuw==
|
||||
"@chakra-ui/provider@2.2.2":
|
||||
version "2.2.2"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/provider/-/provider-2.2.2.tgz#a798d1c243f33e00c85763834a7350e0d1c643ad"
|
||||
integrity sha512-UVwnIDnAWq1aKroN5AF+OpNpUqLVeIUk7tKvX3z4CY9FsPFFi6LTEhRHdhpwaU1Tau3Tf9agEu5URegpY7S8BA==
|
||||
dependencies:
|
||||
"@chakra-ui/css-reset" "2.0.12"
|
||||
"@chakra-ui/portal" "2.0.15"
|
||||
"@chakra-ui/css-reset" "2.1.1"
|
||||
"@chakra-ui/portal" "2.0.16"
|
||||
"@chakra-ui/react-env" "3.0.0"
|
||||
"@chakra-ui/system" "2.5.1"
|
||||
"@chakra-ui/system" "2.5.5"
|
||||
"@chakra-ui/utils" "2.0.15"
|
||||
|
||||
"@chakra-ui/radio@2.0.19":
|
||||
version "2.0.19"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/radio/-/radio-2.0.19.tgz#8d5c02eae8eddbced4476b1b50921ade62f0a744"
|
||||
integrity sha512-PlJiV59eGSmeKP4v/4+ccQUWGRd0cjPKkj/p3L+UbOf8pl9dWm8y9kIeL5TYbghQSDv0nzkrH4+yMnnDTZjdMQ==
|
||||
"@chakra-ui/radio@2.0.22":
|
||||
version "2.0.22"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/radio/-/radio-2.0.22.tgz#fad0ce7c9ba4051991ed517cac4cfe526d6d47d9"
|
||||
integrity sha512-GsQ5WAnLwivWl6gPk8P1x+tCcpVakCt5R5T0HumF7DGPXKdJbjS+RaFySrbETmyTJsKY4QrfXn+g8CWVrMjPjw==
|
||||
dependencies:
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@zag-js/focus-visible" "0.2.1"
|
||||
"@zag-js/focus-visible" "0.2.2"
|
||||
|
||||
"@chakra-ui/react-children-utils@2.0.6":
|
||||
version "2.0.6"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react-children-utils/-/react-children-utils-2.0.6.tgz#6c480c6a60678fcb75cb7d57107c7a79e5179b92"
|
||||
integrity sha512-QVR2RC7QsOsbWwEnq9YduhpqSFnZGvjjGREV8ygKi8ADhXh93C8azLECCUVgRJF2Wc+So1fgxmjLcbZfY2VmBA==
|
||||
|
||||
"@chakra-ui/react-context@2.0.7":
|
||||
version "2.0.7"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react-context/-/react-context-2.0.7.tgz#f79a2b072d04d4280ec8799dc03a8a1af521ca2e"
|
||||
integrity sha512-i7EGmSU+h2GB30cwrKB4t1R5BMHyGoJM5L2Zz7b+ZUX4aAqyPcfe97wPiQB6Rgr1ImGXrUeov4CDVrRZ2FPgLQ==
|
||||
"@chakra-ui/react-context@2.0.8":
|
||||
version "2.0.8"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react-context/-/react-context-2.0.8.tgz#5e0ed33ac3995875a21dea0e12b0ee5fc4c2e3cc"
|
||||
integrity sha512-tRTKdn6lCTXM6WPjSokAAKCw2ioih7Eg8cNgaYRSwKBck8nkz9YqxgIIEj3dJD7MGtpl24S/SNI98iRWkRwR/A==
|
||||
|
||||
"@chakra-ui/react-env@3.0.0":
|
||||
version "3.0.0"
|
||||
@@ -568,12 +568,12 @@
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react-use-safe-layout-effect/-/react-use-safe-layout-effect-2.0.5.tgz#6cf388c37fd2a42b5295a292e149b32f860a00a7"
|
||||
integrity sha512-MwAQBz3VxoeFLaesaSEN87reVNVbjcQBDex2WGexAg6hUB6n4gc1OWYH/iXp4tzp4kuggBNhEHkk9BMYXWfhJQ==
|
||||
|
||||
"@chakra-ui/react-use-size@2.0.9":
|
||||
version "2.0.9"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react-use-size/-/react-use-size-2.0.9.tgz#00717867b98a24c3bdcfaa0c3e70732404193486"
|
||||
integrity sha512-Jce7QmO1jlQZq+Y77VKckWzroRnajChzUQ8xhLQZO6VbYvrpg3cu+X2QCz3G+MZzB+1/hnvvAqmZ+uJLd8rEJg==
|
||||
"@chakra-ui/react-use-size@2.0.10":
|
||||
version "2.0.10"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react-use-size/-/react-use-size-2.0.10.tgz#6131950852490c06e5fb3760bf64097c8057391f"
|
||||
integrity sha512-fdIkH14GDnKQrtQfxX8N3gxbXRPXEl67Y3zeD9z4bKKcQUAYIMqs0MsPZY+FMpGQw8QqafM44nXfL038aIrC5w==
|
||||
dependencies:
|
||||
"@zag-js/element-size" "0.3.1"
|
||||
"@zag-js/element-size" "0.3.2"
|
||||
|
||||
"@chakra-ui/react-use-timeout@2.0.5":
|
||||
version "2.0.5"
|
||||
@@ -594,69 +594,69 @@
|
||||
dependencies:
|
||||
"@chakra-ui/utils" "2.0.15"
|
||||
|
||||
"@chakra-ui/react@^2.5.1":
|
||||
version "2.5.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react/-/react-2.5.1.tgz#05414db2b512bd4402e42eecc6b915d85102c576"
|
||||
integrity sha512-ugkaqfcNMb9L4TkalWiF3rnqfr0TlUUD46JZaDIZiORVisaSwXTZTQrVfG40VghhaJT28rnC5WtiE8kd567ZBQ==
|
||||
"@chakra-ui/react@^2.5.5":
|
||||
version "2.5.5"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/react/-/react-2.5.5.tgz#5ae2450ec0d10d63e1314747466f21cf542032ff"
|
||||
integrity sha512-aBVMUtdWv2MrptD/tKSqICPsuJ+I+jvauegffO1qPUDlK3RrXIDeOHkLGWohgXNcjY5bGVWguFEzJm97//0ooQ==
|
||||
dependencies:
|
||||
"@chakra-ui/accordion" "2.1.9"
|
||||
"@chakra-ui/alert" "2.0.17"
|
||||
"@chakra-ui/avatar" "2.2.5"
|
||||
"@chakra-ui/breadcrumb" "2.1.4"
|
||||
"@chakra-ui/button" "2.0.16"
|
||||
"@chakra-ui/accordion" "2.1.11"
|
||||
"@chakra-ui/alert" "2.1.0"
|
||||
"@chakra-ui/avatar" "2.2.8"
|
||||
"@chakra-ui/breadcrumb" "2.1.5"
|
||||
"@chakra-ui/button" "2.0.18"
|
||||
"@chakra-ui/card" "2.1.6"
|
||||
"@chakra-ui/checkbox" "2.2.10"
|
||||
"@chakra-ui/checkbox" "2.2.14"
|
||||
"@chakra-ui/close-button" "2.0.17"
|
||||
"@chakra-ui/control-box" "2.0.13"
|
||||
"@chakra-ui/counter" "2.0.14"
|
||||
"@chakra-ui/css-reset" "2.0.12"
|
||||
"@chakra-ui/editable" "2.0.19"
|
||||
"@chakra-ui/css-reset" "2.1.1"
|
||||
"@chakra-ui/editable" "2.0.21"
|
||||
"@chakra-ui/focus-lock" "2.0.16"
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/hooks" "2.1.6"
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/image" "2.0.15"
|
||||
"@chakra-ui/input" "2.0.20"
|
||||
"@chakra-ui/layout" "2.1.16"
|
||||
"@chakra-ui/input" "2.0.21"
|
||||
"@chakra-ui/layout" "2.1.18"
|
||||
"@chakra-ui/live-region" "2.0.13"
|
||||
"@chakra-ui/media-query" "3.2.12"
|
||||
"@chakra-ui/menu" "2.1.9"
|
||||
"@chakra-ui/modal" "2.2.9"
|
||||
"@chakra-ui/number-input" "2.0.18"
|
||||
"@chakra-ui/pin-input" "2.0.19"
|
||||
"@chakra-ui/popover" "2.1.8"
|
||||
"@chakra-ui/menu" "2.1.12"
|
||||
"@chakra-ui/modal" "2.2.11"
|
||||
"@chakra-ui/number-input" "2.0.19"
|
||||
"@chakra-ui/pin-input" "2.0.20"
|
||||
"@chakra-ui/popover" "2.1.9"
|
||||
"@chakra-ui/popper" "3.0.13"
|
||||
"@chakra-ui/portal" "2.0.15"
|
||||
"@chakra-ui/progress" "2.1.5"
|
||||
"@chakra-ui/provider" "2.1.2"
|
||||
"@chakra-ui/radio" "2.0.19"
|
||||
"@chakra-ui/portal" "2.0.16"
|
||||
"@chakra-ui/progress" "2.1.6"
|
||||
"@chakra-ui/provider" "2.2.2"
|
||||
"@chakra-ui/radio" "2.0.22"
|
||||
"@chakra-ui/react-env" "3.0.0"
|
||||
"@chakra-ui/select" "2.0.18"
|
||||
"@chakra-ui/select" "2.0.19"
|
||||
"@chakra-ui/skeleton" "2.0.24"
|
||||
"@chakra-ui/slider" "2.0.21"
|
||||
"@chakra-ui/slider" "2.0.23"
|
||||
"@chakra-ui/spinner" "2.0.13"
|
||||
"@chakra-ui/stat" "2.0.17"
|
||||
"@chakra-ui/styled-system" "2.6.1"
|
||||
"@chakra-ui/switch" "2.0.22"
|
||||
"@chakra-ui/system" "2.5.1"
|
||||
"@chakra-ui/table" "2.0.16"
|
||||
"@chakra-ui/tabs" "2.1.8"
|
||||
"@chakra-ui/tag" "2.0.17"
|
||||
"@chakra-ui/textarea" "2.0.18"
|
||||
"@chakra-ui/theme" "2.2.5"
|
||||
"@chakra-ui/theme-utils" "2.0.11"
|
||||
"@chakra-ui/toast" "6.0.1"
|
||||
"@chakra-ui/tooltip" "2.2.6"
|
||||
"@chakra-ui/transition" "2.0.15"
|
||||
"@chakra-ui/stat" "2.0.18"
|
||||
"@chakra-ui/styled-system" "2.8.0"
|
||||
"@chakra-ui/switch" "2.0.26"
|
||||
"@chakra-ui/system" "2.5.5"
|
||||
"@chakra-ui/table" "2.0.17"
|
||||
"@chakra-ui/tabs" "2.1.9"
|
||||
"@chakra-ui/tag" "3.0.0"
|
||||
"@chakra-ui/textarea" "2.0.19"
|
||||
"@chakra-ui/theme" "3.0.1"
|
||||
"@chakra-ui/theme-utils" "2.0.15"
|
||||
"@chakra-ui/toast" "6.1.1"
|
||||
"@chakra-ui/tooltip" "2.2.7"
|
||||
"@chakra-ui/transition" "2.0.16"
|
||||
"@chakra-ui/utils" "2.0.15"
|
||||
"@chakra-ui/visually-hidden" "2.0.15"
|
||||
|
||||
"@chakra-ui/select@2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/select/-/select-2.0.18.tgz#4eb6092610067c1b4131353fe39b4466e251395b"
|
||||
integrity sha512-1d2lUT5LM6oOs5x4lzBh4GFDuXX62+lr+sgV7099g951/5UNbb0CS2hSZHsO7yZThLNbr7QTWZvAOAayVcGzdw==
|
||||
"@chakra-ui/select@2.0.19":
|
||||
version "2.0.19"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/select/-/select-2.0.19.tgz#957e95a17a890d8c0a851e2f00a8d8dd17932d66"
|
||||
integrity sha512-eAlFh+JhwtJ17OrB6fO6gEAGOMH18ERNrXLqWbYLrs674Le7xuREgtuAYDoxUzvYXYYTTdOJtVbcHGriI3o6rA==
|
||||
dependencies:
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/shared-utils@2.0.5":
|
||||
@@ -673,20 +673,20 @@
|
||||
"@chakra-ui/react-use-previous" "2.0.5"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/slider@2.0.21":
|
||||
version "2.0.21"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/slider/-/slider-2.0.21.tgz#f65b15bf0d5f827699ff9a2d6faee35006e2bfce"
|
||||
integrity sha512-Mm76yJxEqJl21+3waEcKg3tM8Y4elJ7mcViN6Brj35PTfzUJfSJxeBGo1nLPJ+X5jLj7o/L4kfBmUk3lY4QYEQ==
|
||||
"@chakra-ui/slider@2.0.23":
|
||||
version "2.0.23"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/slider/-/slider-2.0.23.tgz#9130c7aee8ca876be64d1aeba6b84fe421c8207b"
|
||||
integrity sha512-/eyRUXLla+ZdBUPXpakE3SAS2JS8mIJR6qcUYiPVKSpRAi6tMyYeQijAXn2QC1AUVd2JrG8Pz+1Jy7Po3uA7cA==
|
||||
dependencies:
|
||||
"@chakra-ui/number-utils" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-callback-ref" "2.0.7"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
"@chakra-ui/react-use-latest-ref" "2.0.5"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/react-use-pan-event" "2.0.9"
|
||||
"@chakra-ui/react-use-size" "2.0.9"
|
||||
"@chakra-ui/react-use-size" "2.0.10"
|
||||
"@chakra-ui/react-use-update-effect" "2.0.7"
|
||||
|
||||
"@chakra-ui/spinner@2.0.13":
|
||||
@@ -696,82 +696,82 @@
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/stat@2.0.17":
|
||||
version "2.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/stat/-/stat-2.0.17.tgz#2cd712cc7e0d58d9cbd542deea911f1b0925074f"
|
||||
integrity sha512-PhD+5oVLWjQmGLfeZSmexp3AtLcaggWBwoMZ4z8QMZIQzf/fJJWMk0bMqxlpTv8ORDkfY/4ImuFB/RJHvcqlcA==
|
||||
"@chakra-ui/stat@2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/stat/-/stat-2.0.18.tgz#9e5d21d162b7cf2cf92065c19291ead2d4660772"
|
||||
integrity sha512-wKyfBqhVlIs9bkSerUc6F9KJMw0yTIEKArW7dejWwzToCLPr47u+CtYO6jlJHV6lRvkhi4K4Qc6pyvtJxZ3VpA==
|
||||
dependencies:
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/styled-system@2.6.1":
|
||||
version "2.6.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/styled-system/-/styled-system-2.6.1.tgz#302d496d34c0b7b30c646a7e3c9b113a2f4588da"
|
||||
integrity sha512-jy/1dVi1LxjoRCm+Eo5mqBgvPy5SCWMlIcz6GbIZBDpkGeKZwtqrZLjekxxLBCy8ORY+kJlUB0FT6AzVR/1tjw==
|
||||
"@chakra-ui/styled-system@2.8.0":
|
||||
version "2.8.0"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/styled-system/-/styled-system-2.8.0.tgz#c02aa7b4a15bd826c19d055cd226bd44f7470f26"
|
||||
integrity sha512-bmRv/8ACJGGKGx84U1npiUddwdNifJ+/ETklGwooS5APM0ymwUtBYZpFxjYNJrqvVYpg3mVY6HhMyBVptLS7iA==
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
csstype "^3.0.11"
|
||||
lodash.mergewith "4.6.2"
|
||||
|
||||
"@chakra-ui/switch@2.0.22":
|
||||
version "2.0.22"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/switch/-/switch-2.0.22.tgz#7b35e2b10ea4cf91fb49f5175b4335c61dcd25b3"
|
||||
integrity sha512-+/Yy6y7VFD91uSPruF8ZvePi3tl5D8UNVATtWEQ+QBI92DLSM+PtgJ2F0Y9GMZ9NzMxpZ80DqwY7/kqcPCfLvw==
|
||||
"@chakra-ui/switch@2.0.26":
|
||||
version "2.0.26"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/switch/-/switch-2.0.26.tgz#b93eeafd788e47c21222524adceffe9ef62602d6"
|
||||
integrity sha512-x62lF6VazSZJQuVxosChVR6+0lIJe8Pxgkl/C9vxjhp2yVYb3mew5tcX/sDOu0dYZy8ro/9hMfGkdN4r9xEU8A==
|
||||
dependencies:
|
||||
"@chakra-ui/checkbox" "2.2.10"
|
||||
"@chakra-ui/checkbox" "2.2.14"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/system@2.5.1":
|
||||
version "2.5.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/system/-/system-2.5.1.tgz#bc03a11ae31e795966c7618280548d5cd866f47e"
|
||||
integrity sha512-4+86OrcSoq7lGkm5fh+sJ3IWXSTzjz+HOllRbCW2Rtnmcg7ritiXVNV2VygEg2DrCcx5+tNqRHDM764zW+AEug==
|
||||
"@chakra-ui/system@2.5.5":
|
||||
version "2.5.5"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/system/-/system-2.5.5.tgz#b8b070d07ca9b0190363100396eea02cca754cec"
|
||||
integrity sha512-52BIp/Zyvefgxn5RTByfkTeG4J+y81LWEjWm8jCaRFsLVm8IFgqIrngtcq4I7gD5n/UKbneHlb4eLHo4uc5yDQ==
|
||||
dependencies:
|
||||
"@chakra-ui/color-mode" "2.1.12"
|
||||
"@chakra-ui/object-utils" "2.0.8"
|
||||
"@chakra-ui/react-utils" "2.0.12"
|
||||
"@chakra-ui/styled-system" "2.6.1"
|
||||
"@chakra-ui/theme-utils" "2.0.11"
|
||||
"@chakra-ui/styled-system" "2.8.0"
|
||||
"@chakra-ui/theme-utils" "2.0.15"
|
||||
"@chakra-ui/utils" "2.0.15"
|
||||
react-fast-compare "3.2.0"
|
||||
react-fast-compare "3.2.1"
|
||||
|
||||
"@chakra-ui/table@2.0.16":
|
||||
version "2.0.16"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/table/-/table-2.0.16.tgz#e69736cba5cfb218c5e40592ad9280c6e32f6fe7"
|
||||
integrity sha512-vWDXZ6Ad3Aj66curp1tZBHvCfQHX2FJ4ijLiqGgQszWFIchfhJ5vMgEBJaFMZ+BN1draAjuRTZqaQefOApzvRg==
|
||||
"@chakra-ui/table@2.0.17":
|
||||
version "2.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/table/-/table-2.0.17.tgz#ad394dc6dcbe5a8a9e6d899997ecca3471603977"
|
||||
integrity sha512-OScheTEp1LOYvTki2NFwnAYvac8siAhW9BI5RKm5f5ORL2gVJo4I72RUqE0aKe1oboxgm7CYt5afT5PS5cG61A==
|
||||
dependencies:
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/tabs@2.1.8":
|
||||
version "2.1.8"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/tabs/-/tabs-2.1.8.tgz#e83071380f9a3633810308d45de51be7a74f5eb9"
|
||||
integrity sha512-B7LeFN04Ny2jsSy5TFOQxnbZ6ITxGxLxsB2PE0vvQjMSblBrUryOxdjw80HZhfiw6od0ikK9CeKQOIt9QCguSw==
|
||||
"@chakra-ui/tabs@2.1.9":
|
||||
version "2.1.9"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/tabs/-/tabs-2.1.9.tgz#2e5214cb453c6cc0c240e82bd88af1042fc6fe0e"
|
||||
integrity sha512-Yf8e0kRvaGM6jfkJum0aInQ0U3ZlCafmrYYni2lqjcTtThqu+Yosmo3iYlnullXxCw5MVznfrkb9ySvgQowuYg==
|
||||
dependencies:
|
||||
"@chakra-ui/clickable" "2.0.14"
|
||||
"@chakra-ui/descendant" "3.0.13"
|
||||
"@chakra-ui/descendant" "3.0.14"
|
||||
"@chakra-ui/lazy-utils" "2.0.5"
|
||||
"@chakra-ui/react-children-utils" "2.0.6"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-controllable-state" "2.0.8"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/react-use-safe-layout-effect" "2.0.5"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/tag@2.0.17":
|
||||
version "2.0.17"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/tag/-/tag-2.0.17.tgz#97adb86db190ddb3526060b78c590392e0ac8b4c"
|
||||
integrity sha512-A47zE9Ft9qxOJ+5r1cUseKRCoEdqCRzFm0pOtZgRcckqavglk75Xjgz8HbBpUO2zqqd49MlqdOwR8o87fXS1vg==
|
||||
"@chakra-ui/tag@3.0.0":
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/tag/-/tag-3.0.0.tgz#d86cdab59bb3ff7fc628c2dbe7a5ff1b36bd3e96"
|
||||
integrity sha512-YWdMmw/1OWRwNkG9pX+wVtZio+B89odaPj6XeMn5nfNN8+jyhIEpouWv34+CO9G0m1lupJTxPSfgLAd7cqXZMA==
|
||||
dependencies:
|
||||
"@chakra-ui/icon" "3.0.16"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
|
||||
"@chakra-ui/textarea@2.0.18":
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/textarea/-/textarea-2.0.18.tgz#da6d629b465f65bbc7b48039c2e48a4ae1d853b4"
|
||||
integrity sha512-aGHHb29vVifO0OtcK/k8cMykzjOKo/coDTU0NJqz7OOLAWIMNV2eGenvmO1n9tTZbmbqHiX+Sa1nPRX+pd14lg==
|
||||
"@chakra-ui/textarea@2.0.19":
|
||||
version "2.0.19"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/textarea/-/textarea-2.0.19.tgz#470b459f9cb3255d2abbe07d46b0a5b60a6a32c5"
|
||||
integrity sha512-adJk+qVGsFeJDvfn56CcJKKse8k7oMGlODrmpnpTdF+xvlsiTM+1GfaJvgNSpHHuQFdz/A0z1uJtfGefk0G2ZA==
|
||||
dependencies:
|
||||
"@chakra-ui/form-control" "2.0.17"
|
||||
"@chakra-ui/form-control" "2.0.18"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/theme-tools@2.0.17":
|
||||
@@ -783,57 +783,57 @@
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
color2k "^2.0.0"
|
||||
|
||||
"@chakra-ui/theme-utils@2.0.11":
|
||||
version "2.0.11"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/theme-utils/-/theme-utils-2.0.11.tgz#c01b1d14fdd63326d1ad11fd8f0872921ea43872"
|
||||
integrity sha512-lBAay6Sq3/fl7exd3mFxWAbzgdQowytor0fnlHrpNStn1HgFjXukwsf6356XQOie2Vd8qaMM7qZtMh4AiC0dcg==
|
||||
"@chakra-ui/theme-utils@2.0.15":
|
||||
version "2.0.15"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/theme-utils/-/theme-utils-2.0.15.tgz#968a5e8c47bb403323fe67049c7b751a6e47f069"
|
||||
integrity sha512-UuxtEgE7gwMTGDXtUpTOI7F5X0iHB9ekEOG5PWPn2wWBL7rlk2JtPI7UP5Um5Yg6vvBfXYGK1ySahxqsgf+87g==
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/styled-system" "2.6.1"
|
||||
"@chakra-ui/theme" "2.2.5"
|
||||
"@chakra-ui/styled-system" "2.8.0"
|
||||
"@chakra-ui/theme" "3.0.1"
|
||||
lodash.mergewith "4.6.2"
|
||||
|
||||
"@chakra-ui/theme@2.2.5":
|
||||
version "2.2.5"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/theme/-/theme-2.2.5.tgz#18ed1755ff27c1ff1f1a77083ffc546c361c926e"
|
||||
integrity sha512-hYASZMwu0NqEv6PPydu+F3I+kMNd44yR4TwjR/lXBz/LEh64L6UPY6kQjebCfgdVtsGdl3HKg+eLlfa7SvfRgw==
|
||||
"@chakra-ui/theme@3.0.1":
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/theme/-/theme-3.0.1.tgz#151fc5d1e23d0fd0cd29d28acf8f6017269c13fc"
|
||||
integrity sha512-92kDm/Ux/51uJqhRKevQo/O/rdwucDYcpHg2QuwzdAxISCeYvgtl2TtgOOl5EnqEP0j3IEAvZHZUlv8TTbawaw==
|
||||
dependencies:
|
||||
"@chakra-ui/anatomy" "2.1.2"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/theme-tools" "2.0.17"
|
||||
|
||||
"@chakra-ui/toast@6.0.1":
|
||||
version "6.0.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/toast/-/toast-6.0.1.tgz#726b67a57cdd592320bb3f450c66d007a2a1d902"
|
||||
integrity sha512-ej2kJXvu/d2h6qnXU5D8XTyw0qpsfmbiU7hUffo/sPxkz89AUOQ08RUuUmB1ssW/FZcQvNMJ5WgzCTKHGBxtxw==
|
||||
"@chakra-ui/toast@6.1.1":
|
||||
version "6.1.1"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/toast/-/toast-6.1.1.tgz#7ca78f38069bc87fa75b64de76c8fc2758bdf419"
|
||||
integrity sha512-JtjIKkPVjEu8okGGCipCxNVgK/15h5AicTATZ6RbG2MsHmr4GfKG3fUCvpbuZseArqmLqGLQZQJjVE9vJzaSkQ==
|
||||
dependencies:
|
||||
"@chakra-ui/alert" "2.0.17"
|
||||
"@chakra-ui/alert" "2.1.0"
|
||||
"@chakra-ui/close-button" "2.0.17"
|
||||
"@chakra-ui/portal" "2.0.15"
|
||||
"@chakra-ui/react-context" "2.0.7"
|
||||
"@chakra-ui/portal" "2.0.16"
|
||||
"@chakra-ui/react-context" "2.0.8"
|
||||
"@chakra-ui/react-use-timeout" "2.0.5"
|
||||
"@chakra-ui/react-use-update-effect" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
"@chakra-ui/styled-system" "2.6.1"
|
||||
"@chakra-ui/theme" "2.2.5"
|
||||
"@chakra-ui/styled-system" "2.8.0"
|
||||
"@chakra-ui/theme" "3.0.1"
|
||||
|
||||
"@chakra-ui/tooltip@2.2.6":
|
||||
version "2.2.6"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/tooltip/-/tooltip-2.2.6.tgz#a38f9ff2dd8a574c8cf49526c3846533455f8ddd"
|
||||
integrity sha512-4cbneidZ5+HCWge3OZzewRQieIvhDjSsl+scrl4Scx7E0z3OmqlTIESU5nGIZDBLYqKn/UirEZhqaQ33FOS2fw==
|
||||
"@chakra-ui/tooltip@2.2.7":
|
||||
version "2.2.7"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/tooltip/-/tooltip-2.2.7.tgz#7c305efb057a5fe4694b1b8d82395aec776d8f57"
|
||||
integrity sha512-ImUJ6NnVqARaYqpgtO+kzucDRmxo8AF3jMjARw0bx2LxUkKwgRCOEaaRK5p5dHc0Kr6t5/XqjDeUNa19/sLauA==
|
||||
dependencies:
|
||||
"@chakra-ui/popper" "3.0.13"
|
||||
"@chakra-ui/portal" "2.0.15"
|
||||
"@chakra-ui/portal" "2.0.16"
|
||||
"@chakra-ui/react-types" "2.0.7"
|
||||
"@chakra-ui/react-use-disclosure" "2.0.8"
|
||||
"@chakra-ui/react-use-event-listener" "2.0.7"
|
||||
"@chakra-ui/react-use-merge-refs" "2.0.7"
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
"@chakra-ui/transition@2.0.15":
|
||||
version "2.0.15"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/transition/-/transition-2.0.15.tgz#c640df2ea82f5ad58c55a6e1a7c338f377cb96d8"
|
||||
integrity sha512-o9LBK/llQfUDHF/Ty3cQ6nShpekKTqHUoJlUOzNKhoTsNpoRerr9v0jwojrX1YI02KtVjfhFU6PiqXlDfREoNw==
|
||||
"@chakra-ui/transition@2.0.16":
|
||||
version "2.0.16"
|
||||
resolved "https://registry.yarnpkg.com/@chakra-ui/transition/-/transition-2.0.16.tgz#498c91e6835bb5d950fd1d1402f483b85f7dcd87"
|
||||
integrity sha512-E+RkwlPc3H7P1crEXmXwDXMB2lqY2LLia2P5siQ4IEnRWIgZXlIw+8Em+NtHNgusel2N+9yuB0wT9SeZZeZ3CQ==
|
||||
dependencies:
|
||||
"@chakra-ui/shared-utils" "2.0.5"
|
||||
|
||||
@@ -1081,6 +1081,18 @@
|
||||
resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.16.17.tgz#c5a1a4bfe1b57f0c3e61b29883525c6da3e5c091"
|
||||
integrity sha512-y+EHuSchhL7FjHgvQL/0fnnFmO4T1bhvWANX6gcnqTjtnKWbTvUMCpGnv2+t+31d7RzyEAYAd4u2fnIhHL6N/Q==
|
||||
|
||||
"@eslint-community/eslint-utils@^4.2.0":
|
||||
version "4.4.0"
|
||||
resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59"
|
||||
integrity sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==
|
||||
dependencies:
|
||||
eslint-visitor-keys "^3.3.0"
|
||||
|
||||
"@eslint-community/regexpp@^4.4.0":
|
||||
version "4.5.0"
|
||||
resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.5.0.tgz#f6f729b02feee2c749f57e334b7a1b5f40a81724"
|
||||
integrity sha512-vITaYzIcNmjn5tF5uxcZ/ft7/RXGrMUIS9HalWckEOF6ESiwXKoMzAQf2UW0aVd6rnOeExTJVd5hmWXucBKGXQ==
|
||||
|
||||
"@eslint/eslintrc@^1.4.1":
|
||||
version "1.4.1"
|
||||
resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-1.4.1.tgz#af58772019a2d271b7e2d4c23ff4ddcba3ccfb3e"
|
||||
@@ -1768,47 +1780,47 @@
|
||||
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.0.tgz#53ef263e5239728b56096b0a869595135b7952d2"
|
||||
integrity sha512-kr90f+ERiQtKWMz5rP32ltJ/BtULDI5RVO0uavn1HQUOwjx0R1h0rnDYNL0CepF1zL5bSY6FISAfd9tOdDhU5Q==
|
||||
|
||||
"@typescript-eslint/eslint-plugin@^5.52.0":
|
||||
version "5.52.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.52.0.tgz#5fb0d43574c2411f16ea80f5fc335b8eaa7b28a8"
|
||||
integrity sha512-lHazYdvYVsBokwCdKOppvYJKaJ4S41CgKBcPvyd0xjZNbvQdhn/pnJlGtQksQ/NhInzdaeaSarlBjDXHuclEbg==
|
||||
"@typescript-eslint/eslint-plugin@^5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.57.0.tgz#52c8a7a4512f10e7249ca1e2e61f81c62c34365c"
|
||||
integrity sha512-itag0qpN6q2UMM6Xgk6xoHa0D0/P+M17THnr4SVgqn9Rgam5k/He33MA7/D7QoJcdMxHFyX7U9imaBonAX/6qA==
|
||||
dependencies:
|
||||
"@typescript-eslint/scope-manager" "5.52.0"
|
||||
"@typescript-eslint/type-utils" "5.52.0"
|
||||
"@typescript-eslint/utils" "5.52.0"
|
||||
"@eslint-community/regexpp" "^4.4.0"
|
||||
"@typescript-eslint/scope-manager" "5.57.0"
|
||||
"@typescript-eslint/type-utils" "5.57.0"
|
||||
"@typescript-eslint/utils" "5.57.0"
|
||||
debug "^4.3.4"
|
||||
grapheme-splitter "^1.0.4"
|
||||
ignore "^5.2.0"
|
||||
natural-compare-lite "^1.4.0"
|
||||
regexpp "^3.2.0"
|
||||
semver "^7.3.7"
|
||||
tsutils "^3.21.0"
|
||||
|
||||
"@typescript-eslint/parser@^5.52.0":
|
||||
version "5.52.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-5.52.0.tgz#73c136df6c0133f1d7870de7131ccf356f5be5a4"
|
||||
integrity sha512-e2KiLQOZRo4Y0D/b+3y08i3jsekoSkOYStROYmPUnGMEoA0h+k2qOH5H6tcjIc68WDvGwH+PaOrP1XRzLJ6QlA==
|
||||
"@typescript-eslint/parser@^5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-5.57.0.tgz#f675bf2cd1a838949fd0de5683834417b757e4fa"
|
||||
integrity sha512-orrduvpWYkgLCyAdNtR1QIWovcNZlEm6yL8nwH/eTxWLd8gsP+25pdLHYzL2QdkqrieaDwLpytHqycncv0woUQ==
|
||||
dependencies:
|
||||
"@typescript-eslint/scope-manager" "5.52.0"
|
||||
"@typescript-eslint/types" "5.52.0"
|
||||
"@typescript-eslint/typescript-estree" "5.52.0"
|
||||
"@typescript-eslint/scope-manager" "5.57.0"
|
||||
"@typescript-eslint/types" "5.57.0"
|
||||
"@typescript-eslint/typescript-estree" "5.57.0"
|
||||
debug "^4.3.4"
|
||||
|
||||
"@typescript-eslint/scope-manager@5.52.0":
|
||||
version "5.52.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.52.0.tgz#a993d89a0556ea16811db48eabd7c5b72dcb83d1"
|
||||
integrity sha512-AR7sxxfBKiNV0FWBSARxM8DmNxrwgnYMPwmpkC1Pl1n+eT8/I2NAUPuwDy/FmDcC6F8pBfmOcaxcxRHspgOBMw==
|
||||
"@typescript-eslint/scope-manager@5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.57.0.tgz#79ccd3fa7bde0758059172d44239e871e087ea36"
|
||||
integrity sha512-NANBNOQvllPlizl9LatX8+MHi7bx7WGIWYjPHDmQe5Si/0YEYfxSljJpoTyTWFTgRy3X8gLYSE4xQ2U+aCozSw==
|
||||
dependencies:
|
||||
"@typescript-eslint/types" "5.52.0"
|
||||
"@typescript-eslint/visitor-keys" "5.52.0"
|
||||
"@typescript-eslint/types" "5.57.0"
|
||||
"@typescript-eslint/visitor-keys" "5.57.0"
|
||||
|
||||
"@typescript-eslint/type-utils@5.52.0":
|
||||
version "5.52.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-5.52.0.tgz#9fd28cd02e6f21f5109e35496df41893f33167aa"
|
||||
integrity sha512-tEKuUHfDOv852QGlpPtB3lHOoig5pyFQN/cUiZtpw99D93nEBjexRLre5sQZlkMoHry/lZr8qDAt2oAHLKA6Jw==
|
||||
"@typescript-eslint/type-utils@5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-5.57.0.tgz#98e7531c4e927855d45bd362de922a619b4319f2"
|
||||
integrity sha512-kxXoq9zOTbvqzLbdNKy1yFrxLC6GDJFE2Yuo3KqSwTmDOFjUGeWSakgoXT864WcK5/NAJkkONCiKb1ddsqhLXQ==
|
||||
dependencies:
|
||||
"@typescript-eslint/typescript-estree" "5.52.0"
|
||||
"@typescript-eslint/utils" "5.52.0"
|
||||
"@typescript-eslint/typescript-estree" "5.57.0"
|
||||
"@typescript-eslint/utils" "5.57.0"
|
||||
debug "^4.3.4"
|
||||
tsutils "^3.21.0"
|
||||
|
||||
@@ -1822,13 +1834,18 @@
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.52.0.tgz#19e9abc6afb5bd37a1a9bea877a1a836c0b3241b"
|
||||
integrity sha512-oV7XU4CHYfBhk78fS7tkum+/Dpgsfi91IIDy7fjCyq2k6KB63M6gMC0YIvy+iABzmXThCRI6xpCEyVObBdWSDQ==
|
||||
|
||||
"@typescript-eslint/typescript-estree@5.52.0", "@typescript-eslint/typescript-estree@^5.13.0":
|
||||
version "5.52.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.52.0.tgz#6408cb3c2ccc01c03c278cb201cf07e73347dfca"
|
||||
integrity sha512-WeWnjanyEwt6+fVrSR0MYgEpUAuROxuAH516WPjUblIrClzYJj0kBbjdnbQXLpgAN8qbEuGywiQsXUVDiAoEuQ==
|
||||
"@typescript-eslint/types@5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.57.0.tgz#727bfa2b64c73a4376264379cf1f447998eaa132"
|
||||
integrity sha512-mxsod+aZRSyLT+jiqHw1KK6xrANm19/+VFALVFP5qa/aiJnlP38qpyaTd0fEKhWvQk6YeNZ5LGwI1pDpBRBhtQ==
|
||||
|
||||
"@typescript-eslint/typescript-estree@5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.57.0.tgz#ebcd0ee3e1d6230e888d88cddf654252d41e2e40"
|
||||
integrity sha512-LTzQ23TV82KpO8HPnWuxM2V7ieXW8O142I7hQTxWIHDcCEIjtkat6H96PFkYBQqGFLW/G/eVVOB9Z8rcvdY/Vw==
|
||||
dependencies:
|
||||
"@typescript-eslint/types" "5.52.0"
|
||||
"@typescript-eslint/visitor-keys" "5.52.0"
|
||||
"@typescript-eslint/types" "5.57.0"
|
||||
"@typescript-eslint/visitor-keys" "5.57.0"
|
||||
debug "^4.3.4"
|
||||
globby "^11.1.0"
|
||||
is-glob "^4.0.3"
|
||||
@@ -1848,18 +1865,31 @@
|
||||
semver "^7.3.5"
|
||||
tsutils "^3.21.0"
|
||||
|
||||
"@typescript-eslint/utils@5.52.0":
|
||||
"@typescript-eslint/typescript-estree@^5.13.0":
|
||||
version "5.52.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-5.52.0.tgz#b260bb5a8f6b00a0ed51db66bdba4ed5e4845a72"
|
||||
integrity sha512-As3lChhrbwWQLNk2HC8Ree96hldKIqk98EYvypd3It8Q1f8d5zWyIoaZEp2va5667M4ZyE7X8UUR+azXrFl+NA==
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.52.0.tgz#6408cb3c2ccc01c03c278cb201cf07e73347dfca"
|
||||
integrity sha512-WeWnjanyEwt6+fVrSR0MYgEpUAuROxuAH516WPjUblIrClzYJj0kBbjdnbQXLpgAN8qbEuGywiQsXUVDiAoEuQ==
|
||||
dependencies:
|
||||
"@typescript-eslint/types" "5.52.0"
|
||||
"@typescript-eslint/visitor-keys" "5.52.0"
|
||||
debug "^4.3.4"
|
||||
globby "^11.1.0"
|
||||
is-glob "^4.0.3"
|
||||
semver "^7.3.7"
|
||||
tsutils "^3.21.0"
|
||||
|
||||
"@typescript-eslint/utils@5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-5.57.0.tgz#eab8f6563a2ac31f60f3e7024b91bf75f43ecef6"
|
||||
integrity sha512-ps/4WohXV7C+LTSgAL5CApxvxbMkl9B9AUZRtnEFonpIxZDIT7wC1xfvuJONMidrkB9scs4zhtRyIwHh4+18kw==
|
||||
dependencies:
|
||||
"@eslint-community/eslint-utils" "^4.2.0"
|
||||
"@types/json-schema" "^7.0.9"
|
||||
"@types/semver" "^7.3.12"
|
||||
"@typescript-eslint/scope-manager" "5.52.0"
|
||||
"@typescript-eslint/types" "5.52.0"
|
||||
"@typescript-eslint/typescript-estree" "5.52.0"
|
||||
"@typescript-eslint/scope-manager" "5.57.0"
|
||||
"@typescript-eslint/types" "5.57.0"
|
||||
"@typescript-eslint/typescript-estree" "5.57.0"
|
||||
eslint-scope "^5.1.1"
|
||||
eslint-utils "^3.0.0"
|
||||
semver "^7.3.7"
|
||||
|
||||
"@typescript-eslint/visitor-keys@4.33.0":
|
||||
@@ -1878,6 +1908,14 @@
|
||||
"@typescript-eslint/types" "5.52.0"
|
||||
eslint-visitor-keys "^3.3.0"
|
||||
|
||||
"@typescript-eslint/visitor-keys@5.57.0":
|
||||
version "5.57.0"
|
||||
resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-5.57.0.tgz#e2b2f4174aff1d15eef887ce3d019ecc2d7a8ac1"
|
||||
integrity sha512-ery2g3k0hv5BLiKpPuwYt9KBkAp2ugT6VvyShXdLOkax895EC55sP0Tx5L0fZaQueiK3fBLvHVvEl3jFS5ia+g==
|
||||
dependencies:
|
||||
"@typescript-eslint/types" "5.57.0"
|
||||
eslint-visitor-keys "^3.3.0"
|
||||
|
||||
"@vitejs/plugin-react-swc@^3.2.0":
|
||||
version "3.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vitejs/plugin-react-swc/-/plugin-react-swc-3.2.0.tgz#7c4f6e116a296c27f680d05750f9dbf798cf7709"
|
||||
@@ -1890,15 +1928,15 @@
|
||||
resolved "https://registry.yarnpkg.com/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz#e77a97fbd345b76d83245edcd17d393b1b41fb31"
|
||||
integrity sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==
|
||||
|
||||
"@zag-js/element-size@0.3.1":
|
||||
version "0.3.1"
|
||||
resolved "https://registry.yarnpkg.com/@zag-js/element-size/-/element-size-0.3.1.tgz#f9f6ae98355e2250d18d0f6e2f1134a0ae4c6a2f"
|
||||
integrity sha512-jR5j4G//bRzcxwAACWi9EfITnwjNmn10LxF4NmALrdZU7/PNWP3uUCdhCxd/0SCyeiJXUl0yvD57rWAbKPs1nw==
|
||||
"@zag-js/element-size@0.3.2":
|
||||
version "0.3.2"
|
||||
resolved "https://registry.yarnpkg.com/@zag-js/element-size/-/element-size-0.3.2.tgz#ebb76af2a024230482406db41344598d1a9f54f4"
|
||||
integrity sha512-bVvvigUGvAuj7PCkE5AbzvTJDTw5f3bg9nQdv+ErhVN8SfPPppLJEmmWdxqsRzrHXgx8ypJt/+Ty0kjtISVDsQ==
|
||||
|
||||
"@zag-js/focus-visible@0.2.1":
|
||||
version "0.2.1"
|
||||
resolved "https://registry.yarnpkg.com/@zag-js/focus-visible/-/focus-visible-0.2.1.tgz#bf4f1009f4fd35a9728dfaa9214d8cb318fe8b1e"
|
||||
integrity sha512-19uTjoZGP4/Ax7kSNhhay9JA83BirKzpqLkeEAilrpdI1hE5xuq6q+tzJOsrMOOqJrm7LkmZp5lbsTQzvK2pYg==
|
||||
"@zag-js/focus-visible@0.2.2":
|
||||
version "0.2.2"
|
||||
resolved "https://registry.yarnpkg.com/@zag-js/focus-visible/-/focus-visible-0.2.2.tgz#56233480ca1275d3218fb2e10696a33d1a6b9e64"
|
||||
integrity sha512-0j2gZq8HiZ51z4zNnSkF1iSkqlwRDvdH+son3wHdoz+7IUdMN/5Exd4TxMJ+gq2Of1DiXReYLL9qqh2PdQ4wgA==
|
||||
|
||||
accepts@~1.3.4:
|
||||
version "1.3.8"
|
||||
@@ -4510,10 +4548,10 @@ react-dropzone@^14.2.3:
|
||||
file-selector "^0.6.0"
|
||||
prop-types "^15.8.1"
|
||||
|
||||
react-fast-compare@3.2.0:
|
||||
version "3.2.0"
|
||||
resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.0.tgz#641a9da81b6a6320f270e89724fb45a0b39e43bb"
|
||||
integrity sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==
|
||||
react-fast-compare@3.2.1:
|
||||
version "3.2.1"
|
||||
resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.1.tgz#53933d9e14f364281d6cba24bfed7a4afb808b5f"
|
||||
integrity sha512-xTYf9zFim2pEif/Fw16dBiXpe0hoy5PxcD8+OwBnTtNLfIm3g6WxhKNurY+6OmdH1u6Ta/W/Vl6vjbYP1MFnDg==
|
||||
|
||||
react-fast-compare@^2.0.1:
|
||||
version "2.0.4"
|
||||
@@ -5311,6 +5349,11 @@ typescript@^4.0.0, typescript@^4.5.5:
|
||||
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a"
|
||||
integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
|
||||
|
||||
typescript@^5.0.3:
|
||||
version "5.0.3"
|
||||
resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.0.3.tgz#fe976f0c826a88d0a382007681cbb2da44afdedf"
|
||||
integrity sha512-xv8mOEDnigb/tN9PSMTwSEqAnUvkoXMQlicOb0IUVDBSQCgBSaAAROUZYy2IcUy5qU6XajK5jjjO7TMWqBTKZA==
|
||||
|
||||
unbox-primitive@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e"
|
||||
|
||||
@@ -22,6 +22,7 @@ import transformers
|
||||
from diffusers.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from omegaconf import OmegaConf
|
||||
from pathlib import Path
|
||||
from PIL import Image, ImageOps
|
||||
from pytorch_lightning import logging, seed_everything
|
||||
|
||||
@@ -528,6 +529,11 @@ class Generate:
|
||||
log_tokens=self.log_tokenization,
|
||||
)
|
||||
|
||||
|
||||
# untested code, so commented out for now
|
||||
# if self.model.peft_manager:
|
||||
# self.model = self.model.peft_manager.load(self.model, self.model.unet.dtype)
|
||||
|
||||
init_image, mask_image = self._make_images(
|
||||
init_img,
|
||||
init_mask,
|
||||
@@ -909,12 +915,8 @@ class Generate:
|
||||
return self._load_generator(".omnibus", "Omnibus")
|
||||
|
||||
def _load_generator(self, module, class_name):
|
||||
if self.is_legacy_model(self.model_name):
|
||||
mn = f"ldm.invoke.ckpt_generator{module}"
|
||||
cn = f"Ckpt{class_name}"
|
||||
else:
|
||||
mn = f"ldm.invoke.generator{module}"
|
||||
cn = class_name
|
||||
mn = f"ldm.invoke.generator{module}"
|
||||
cn = class_name
|
||||
module = importlib.import_module(mn)
|
||||
constructor = getattr(module, cn)
|
||||
return constructor(self.model, self.precision)
|
||||
@@ -976,7 +978,7 @@ class Generate:
|
||||
self.generators = {}
|
||||
|
||||
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
if self.embedding_path and not model_data.get("ti_embeddings_loaded"):
|
||||
print(f'>> Loading embeddings from {self.embedding_path}')
|
||||
for root, _, files in os.walk(self.embedding_path):
|
||||
for name in files:
|
||||
@@ -984,14 +986,24 @@ class Generate:
|
||||
self.model.textual_inversion_manager.load_textual_inversion(
|
||||
ti_path, defer_injecting_tokens=True
|
||||
)
|
||||
print(
|
||||
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
|
||||
)
|
||||
model_data["ti_embeddings_loaded"] = True
|
||||
print(
|
||||
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
|
||||
)
|
||||
|
||||
self.model_name = model_name
|
||||
self._set_sampler() # requires self.model_name to be set first
|
||||
self._save_last_used_model(model_name)
|
||||
return self.model
|
||||
|
||||
def _save_last_used_model(self,model_name:str):
|
||||
"""
|
||||
Save name of the last model used.
|
||||
"""
|
||||
model_file_path = Path(Globals.root,'.last_model')
|
||||
with open(model_file_path,'w') as f:
|
||||
f.write(model_name)
|
||||
|
||||
def load_huggingface_concepts(self, concepts: list[str]):
|
||||
self.model.textual_inversion_manager.load_huggingface_concepts(concepts)
|
||||
|
||||
@@ -1032,6 +1044,8 @@ class Generate:
|
||||
image_callback=None,
|
||||
prefix=None,
|
||||
):
|
||||
|
||||
results = []
|
||||
for r in image_list:
|
||||
image, seed = r
|
||||
try:
|
||||
@@ -1085,6 +1099,10 @@ class Generate:
|
||||
else:
|
||||
r[0] = image
|
||||
|
||||
results.append([image, seed])
|
||||
|
||||
return results
|
||||
|
||||
def apply_textmask(
|
||||
self, image_path: str, prompt: str, callback, threshold: float = 0.5
|
||||
):
|
||||
@@ -1113,9 +1131,6 @@ class Generate:
|
||||
def sample_to_lowres_estimated_image(self, samples):
|
||||
return self._make_base().sample_to_lowres_estimated_image(samples)
|
||||
|
||||
def is_legacy_model(self, model_name) -> bool:
|
||||
return self.model_manager.is_legacy(model_name)
|
||||
|
||||
def _set_sampler(self):
|
||||
if isinstance(self.model, DiffusionPipeline):
|
||||
return self._set_scheduler()
|
||||
|
||||
@@ -4,6 +4,7 @@ import shlex
|
||||
import sys
|
||||
import traceback
|
||||
from argparse import Namespace
|
||||
from packaging import version
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
@@ -16,6 +17,8 @@ if sys.platform == "darwin":
|
||||
|
||||
import pyparsing # type: ignore
|
||||
|
||||
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
|
||||
|
||||
import ldm.invoke
|
||||
|
||||
from ..generate import Generate
|
||||
@@ -30,13 +33,21 @@ from .pngwriter import PngWriter, retrieve_metadata, write_metadata
|
||||
from .readline import Completer, get_completer
|
||||
from ..util import url_attachment_name
|
||||
|
||||
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
|
||||
|
||||
# global used in multiple functions (fix)
|
||||
infile = None
|
||||
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
|
||||
|
||||
print('DEBUG: [3] Entered main()', file=sys.stderr)
|
||||
print('DEBUG: INVOKEAI ENVIRONMENT:')
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
if not args:
|
||||
@@ -63,11 +74,15 @@ def main():
|
||||
Globals.internet_available = args.internet_available and check_internet()
|
||||
Globals.disable_xformers = not args.xformers
|
||||
Globals.sequential_guidance = args.sequential_guidance
|
||||
Globals.ckpt_convert = args.ckpt_convert
|
||||
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
|
||||
|
||||
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
|
||||
|
||||
# run any post-install patches needed
|
||||
run_patches()
|
||||
|
||||
print(f'DEBUG: [5] Patches run', file=sys.stderr)
|
||||
|
||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||
|
||||
if not args.conf:
|
||||
@@ -83,8 +98,9 @@ def main():
|
||||
# loading here to avoid long delays on startup
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
|
||||
|
||||
import transformers # type: ignore
|
||||
|
||||
from ldm.generate import Generate
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
@@ -92,6 +108,7 @@ def main():
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
|
||||
# Loading Face Restoration and ESRGAN Modules
|
||||
gfpgan, codeformer, esrgan = load_face_restoration(opt)
|
||||
|
||||
@@ -109,7 +126,11 @@ def main():
|
||||
else:
|
||||
embedding_path = None
|
||||
|
||||
if opt.lora_path:
|
||||
Globals.lora_models_dir = opt.lora_path
|
||||
|
||||
# migrate legacy models
|
||||
print(f'DEBUG: [8] migrating models', file=sys.stderr)
|
||||
ModelManager.migrate_models()
|
||||
|
||||
# load the infile as a list of lines
|
||||
@@ -125,11 +146,14 @@ def main():
|
||||
print(f"{e}. Aborting.")
|
||||
sys.exit(-1)
|
||||
|
||||
model = opt.model or retrieve_last_used_model()
|
||||
|
||||
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
|
||||
# creating a Generate object:
|
||||
try:
|
||||
gen = Generate(
|
||||
conf=opt.conf,
|
||||
model=opt.model,
|
||||
model=model,
|
||||
sampler_name=opt.sampler_name,
|
||||
embedding_path=embedding_path,
|
||||
full_precision=opt.full_precision,
|
||||
@@ -151,6 +175,7 @@ def main():
|
||||
print(">> changed to seamless tiling mode")
|
||||
|
||||
# preload the model
|
||||
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
|
||||
try:
|
||||
gen.load_model()
|
||||
except KeyError:
|
||||
@@ -165,14 +190,13 @@ def main():
|
||||
if path := opt.autoimport:
|
||||
gen.model_manager.heuristic_import(
|
||||
str(path),
|
||||
convert=False,
|
||||
commit_to_conf=opt.conf,
|
||||
config_file_callback=lambda x: _pick_configuration_file(completer,x),
|
||||
)
|
||||
|
||||
if path := opt.autoconvert:
|
||||
gen.model_manager.heuristic_import(
|
||||
str(path), convert=True, commit_to_conf=opt.conf
|
||||
str(path), commit_to_conf=opt.conf
|
||||
)
|
||||
|
||||
# web server loops forever
|
||||
@@ -199,6 +223,7 @@ def main():
|
||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||
def main_loop(gen, opt, completer):
|
||||
"""prompt/read/execute loop"""
|
||||
print(f'DEBUG: [11] In main loop', file=sys.stderr)
|
||||
global infile
|
||||
done = False
|
||||
doneAfterInFile = infile is not None
|
||||
@@ -634,7 +659,7 @@ def set_default_output_dir(opt: Args, completer: Completer):
|
||||
completer.set_default_dir(opt.outdir)
|
||||
|
||||
|
||||
def import_model(model_path: str, gen, opt, completer, convert=False):
|
||||
def import_model(model_path: str, gen, opt, completer):
|
||||
"""
|
||||
model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path;
|
||||
(3) a huggingface repository id; or (4) a local directory containing a
|
||||
@@ -665,7 +690,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
|
||||
model_path,
|
||||
model_name=model_name,
|
||||
description=model_desc,
|
||||
convert=convert,
|
||||
config_file_callback=lambda x: _pick_configuration_file(completer,x),
|
||||
)
|
||||
if not imported_name:
|
||||
@@ -771,14 +795,10 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
|
||||
original_config_file = Path(model_info["config"])
|
||||
model_name = model_name_or_path
|
||||
model_description = model_info["description"]
|
||||
vae = model_info["vae"]
|
||||
vae_path = model_info.get("vae")
|
||||
else:
|
||||
print(f"** {model_name_or_path} is not a legacy .ckpt weights file")
|
||||
return
|
||||
if vae_repo := ldm.invoke.model_manager.VAE_TO_REPO_ID.get(Path(vae).stem):
|
||||
vae_repo = dict(repo_id=vae_repo)
|
||||
else:
|
||||
vae_repo = None
|
||||
model_name = manager.convert_and_import(
|
||||
ckpt_path,
|
||||
diffusers_path=Path(
|
||||
@@ -787,11 +807,11 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
|
||||
model_name=model_name,
|
||||
model_description=model_description,
|
||||
original_config_file=original_config_file,
|
||||
vae=vae_repo,
|
||||
vae_path=vae_path,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
import_model(model_name_or_path, gen, opt, completer, convert=True)
|
||||
import_model(model_name_or_path, gen, opt, completer)
|
||||
except KeyboardInterrupt:
|
||||
return
|
||||
|
||||
@@ -833,6 +853,7 @@ def edit_model(model_name: str, gen, opt, completer):
|
||||
print(f"\n>> Editing model {model_name} from configuration file {opt.conf}")
|
||||
new_name = _get_model_name(manager.list_models(), completer, model_name)
|
||||
|
||||
completer.complete_extensions(('.yaml','.ckpt','.safetensors','.pt'))
|
||||
for attribute in info.keys():
|
||||
if type(info[attribute]) != str:
|
||||
continue
|
||||
@@ -840,6 +861,7 @@ def edit_model(model_name: str, gen, opt, completer):
|
||||
continue
|
||||
completer.set_line(info[attribute])
|
||||
info[attribute] = input(f"{attribute}: ") or info[attribute]
|
||||
completer.complete_extensions(None)
|
||||
|
||||
if info["format"] == "diffusers":
|
||||
vae = info.get("vae", dict(repo_id=None, path=None, subfolder=None))
|
||||
@@ -1286,20 +1308,75 @@ def check_internet() -> bool:
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def retrieve_last_used_model()->str:
|
||||
"""
|
||||
Return name of the last model used.
|
||||
"""
|
||||
model_file_path = Path(Globals.root,'.last_model')
|
||||
if not model_file_path.exists():
|
||||
return None
|
||||
with open(model_file_path,'r') as f:
|
||||
return f.readline()
|
||||
|
||||
# This routine performs any patch-ups needed after installation
|
||||
def run_patches():
|
||||
# install ckpt configuration files that may have been added to the
|
||||
# distro after original root directory configuration
|
||||
import invokeai.configs as conf
|
||||
from shutil import copyfile
|
||||
install_missing_config_files()
|
||||
version_file = Path(Globals.root,'.version')
|
||||
if version_file.exists():
|
||||
with open(version_file,'r') as f:
|
||||
root_version = version.parse(f.readline() or 'v2.3.2')
|
||||
else:
|
||||
root_version = version.parse('v2.3.2')
|
||||
app_version = version.parse(ldm.invoke.__version__)
|
||||
if root_version < app_version:
|
||||
try:
|
||||
do_version_update(root_version, ldm.invoke.__version__)
|
||||
with open(version_file,'w') as f:
|
||||
f.write(ldm.invoke.__version__)
|
||||
except:
|
||||
print("** Update failed. Will try again on next launch")
|
||||
|
||||
def install_missing_config_files():
|
||||
"""
|
||||
install ckpt configuration files that may have been added to the
|
||||
distro after original root directory configuration
|
||||
"""
|
||||
pass
|
||||
# import invokeai.configs as conf
|
||||
# from shutil import copyfile
|
||||
|
||||
root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
for src in repo_configs.iterdir():
|
||||
dest = root_configs / src.name
|
||||
if not dest.exists():
|
||||
copyfile(src,dest)
|
||||
# root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
# for src in repo_configs.iterdir():
|
||||
# dest = root_configs / src.name
|
||||
# if not dest.exists():
|
||||
# copyfile(src,dest)
|
||||
|
||||
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||
"""
|
||||
Make any updates to the launcher .sh and .bat scripts that may be needed
|
||||
from release to release. This is not an elegant solution. Instead, the
|
||||
launcher should be moved into the source tree and installed using pip.
|
||||
"""
|
||||
if root_version < version.Version('v2.3.4'):
|
||||
dest = Path(Globals.root,'loras')
|
||||
dest.mkdir(exist_ok=True)
|
||||
if root_version < version.Version('v2.3.3'):
|
||||
if sys.platform == "linux":
|
||||
print('>> Downloading new version of launcher script and its config file')
|
||||
from ldm.util import download_with_progress_bar
|
||||
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
|
||||
|
||||
dest = Path(Globals.root,'invoke.sh.in')
|
||||
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
|
||||
dest.replace(Path(Globals.root,'invoke.sh'))
|
||||
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
|
||||
|
||||
dest = Path(Globals.root,'dialogrc')
|
||||
assert download_with_progress_bar(url_base+'dialogrc',dest)
|
||||
dest.replace(Path(Globals.root,'.dialogrc'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
|
||||
__version__='2.3.3-rc1'
|
||||
__version__='2.3.4'
|
||||
|
||||
@@ -522,8 +522,8 @@ class Args(object):
|
||||
'--ckpt_convert',
|
||||
action=argparse.BooleanOptionalAction,
|
||||
dest='ckpt_convert',
|
||||
default=False,
|
||||
help='Load legacy ckpt files as diffusers. Pass --no-ckpt-convert to inhibit this behavior',
|
||||
default=True,
|
||||
help='Deprecated option. Legacy ckpt files are now always converted to diffusers when loaded.'
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--internet',
|
||||
@@ -654,6 +654,13 @@ class Args(object):
|
||||
type=str,
|
||||
help='Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)'
|
||||
)
|
||||
render_group.add_argument(
|
||||
'--lora_directory',
|
||||
dest='lora_path',
|
||||
default='loras',
|
||||
type=str,
|
||||
help='Path to a directory containing LoRA files; subdirectories are not supported. (default is ROOTDIR/loras)'
|
||||
)
|
||||
render_group.add_argument(
|
||||
'--embeddings',
|
||||
action=argparse.BooleanOptionalAction,
|
||||
@@ -791,12 +798,12 @@ class Args(object):
|
||||
*Model manipulation*
|
||||
!models -- list models in configs/models.yaml
|
||||
!switch <model_name> -- switch to model named <model_name>
|
||||
!import_model /path/to/weights/file.ckpt -- adds a .ckpt model to your config
|
||||
!import_model /path/to/weights/file -- imports a model from a ckpt or safetensors file
|
||||
!import_model /path/to/weights/ -- interactively import models from a directory
|
||||
!import_model http://path_to_model.ckpt -- downloads and adds a .ckpt model to your config
|
||||
!import_model hakurei/waifu-diffusion -- downloads and adds a diffusers model to your config
|
||||
!optimize_model <model_name> -- converts a .ckpt model to a diffusers model
|
||||
!convert_model /path/to/weights/file.ckpt -- converts a .ckpt file path to a diffusers model
|
||||
!import_model http://path_to_model -- downloads and adds a ckpt or safetensors model to your config
|
||||
!import_model hakurei/waifu-diffusion -- downloads and adds a diffusers model to your config using its repo_id
|
||||
!optimize_model <model_name> -- converts a .ckpt/.safetensors model to a diffusers model
|
||||
!convert_model /path/to/weights/file -- converts a .ckpt file path to a diffusers model and adds to your config
|
||||
!edit_model <model_name> -- edit a model's description
|
||||
!del_model <model_name> -- delete a model
|
||||
"""
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
'''
|
||||
Initialization file for the ldm.invoke.generator package
|
||||
'''
|
||||
from .base import CkptGenerator
|
||||
@@ -1,335 +0,0 @@
|
||||
'''
|
||||
Base class for ldm.invoke.ckpt_generator.*
|
||||
including img2img, txt2img, and inpaint
|
||||
|
||||
THESE MODULES ARE TRANSITIONAL AND WILL BE REMOVED AT A FUTURE DATE
|
||||
WHEN LEGACY CKPT MODEL SUPPORT IS DISCONTINUED.
|
||||
'''
|
||||
import torch
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
import os.path as osp
|
||||
import traceback
|
||||
from tqdm import tqdm, trange
|
||||
from PIL import Image, ImageFilter, ImageChops
|
||||
import cv2 as cv
|
||||
from einops import rearrange, repeat
|
||||
from pathlib import Path
|
||||
from pytorch_lightning import seed_everything
|
||||
import invokeai.assets.web as web_assets
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from ldm.util import rand_perlin_2d
|
||||
|
||||
downsampling = 8
|
||||
CAUTION_IMG = 'caution.png'
|
||||
|
||||
class CkptGenerator():
|
||||
def __init__(self, model, precision):
|
||||
self.model = model
|
||||
self.precision = precision
|
||||
self.seed = None
|
||||
self.latent_channels = model.channels
|
||||
self.downsampling_factor = downsampling # BUG: should come from model or config
|
||||
self.safety_checker = None
|
||||
self.perlin = 0.0
|
||||
self.threshold = 0
|
||||
self.variation_amount = 0
|
||||
self.with_variations = []
|
||||
self.use_mps_noise = False
|
||||
self.free_gpu_mem = None
|
||||
self.caution_img = None
|
||||
|
||||
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py
|
||||
def get_make_image(self,prompt,**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it
|
||||
"""
|
||||
raise NotImplementedError("image_iterator() must be implemented in a descendent class")
|
||||
|
||||
def set_variation(self, seed, variation_amount, with_variations):
|
||||
self.seed = seed
|
||||
self.variation_amount = variation_amount
|
||||
self.with_variations = with_variations
|
||||
|
||||
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
|
||||
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
||||
safety_checker:dict=None,
|
||||
attention_maps_callback = None,
|
||||
free_gpu_mem: bool=False,
|
||||
**kwargs):
|
||||
scope = choose_autocast(self.precision)
|
||||
self.safety_checker = safety_checker
|
||||
self.free_gpu_mem = free_gpu_mem
|
||||
attention_maps_images = []
|
||||
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
||||
make_image = self.get_make_image(
|
||||
prompt,
|
||||
sampler = sampler,
|
||||
init_image = init_image,
|
||||
width = width,
|
||||
height = height,
|
||||
step_callback = step_callback,
|
||||
threshold = threshold,
|
||||
perlin = perlin,
|
||||
attention_maps_callback = attention_maps_callback,
|
||||
**kwargs
|
||||
)
|
||||
results = []
|
||||
seed = seed if seed is not None and seed >= 0 else self.new_seed()
|
||||
first_seed = seed
|
||||
seed, initial_noise = self.generate_initial_noise(seed, width, height)
|
||||
|
||||
# There used to be an additional self.model.ema_scope() here, but it breaks
|
||||
# the inpaint-1.5 model. Not sure what it did.... ?
|
||||
with scope(self.model.device.type):
|
||||
for n in trange(iterations, desc='Generating'):
|
||||
x_T = None
|
||||
if self.variation_amount > 0:
|
||||
seed_everything(seed)
|
||||
target_noise = self.get_noise(width,height)
|
||||
x_T = self.slerp(self.variation_amount, initial_noise, target_noise)
|
||||
elif initial_noise is not None:
|
||||
# i.e. we specified particular variations
|
||||
x_T = initial_noise
|
||||
else:
|
||||
seed_everything(seed)
|
||||
try:
|
||||
x_T = self.get_noise(width,height)
|
||||
except:
|
||||
print('** An error occurred while getting initial noise **')
|
||||
print(traceback.format_exc())
|
||||
|
||||
image = make_image(x_T)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
image = self.safety_check(image)
|
||||
|
||||
results.append([image, seed])
|
||||
|
||||
if image_callback is not None:
|
||||
attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1]
|
||||
image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image)
|
||||
|
||||
seed = self.new_seed()
|
||||
|
||||
return results
|
||||
|
||||
def sample_to_image(self,samples)->Image.Image:
|
||||
"""
|
||||
Given samples returned from a sampler, converts
|
||||
it into a PIL Image
|
||||
"""
|
||||
x_samples = self.model.decode_first_stage(samples)
|
||||
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
if len(x_samples) != 1:
|
||||
raise Exception(
|
||||
f'>> expected to get a single image, but got {len(x_samples)}')
|
||||
x_sample = 255.0 * rearrange(
|
||||
x_samples[0].cpu().numpy(), 'c h w -> h w c'
|
||||
)
|
||||
return Image.fromarray(x_sample.astype(np.uint8))
|
||||
|
||||
# write an approximate RGB image from latent samples for a single step to PNG
|
||||
|
||||
def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image:
|
||||
if init_image is None or init_mask is None:
|
||||
return result
|
||||
|
||||
# Get the original alpha channel of the mask if there is one.
|
||||
# Otherwise it is some other black/white image format ('1', 'L' or 'RGB')
|
||||
pil_init_mask = init_mask.getchannel('A') if init_mask.mode == 'RGBA' else init_mask.convert('L')
|
||||
pil_init_image = init_image.convert('RGBA') # Add an alpha channel if one doesn't exist
|
||||
|
||||
# Build an image with only visible pixels from source to use as reference for color-matching.
|
||||
init_rgb_pixels = np.asarray(init_image.convert('RGB'), dtype=np.uint8)
|
||||
init_a_pixels = np.asarray(pil_init_image.getchannel('A'), dtype=np.uint8)
|
||||
init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8)
|
||||
|
||||
# Get numpy version of result
|
||||
np_image = np.asarray(result, dtype=np.uint8)
|
||||
|
||||
# Mask and calculate mean and standard deviation
|
||||
mask_pixels = init_a_pixels * init_mask_pixels > 0
|
||||
np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :]
|
||||
np_image_masked = np_image[mask_pixels, :]
|
||||
|
||||
if np_init_rgb_pixels_masked.size > 0:
|
||||
init_means = np_init_rgb_pixels_masked.mean(axis=0)
|
||||
init_std = np_init_rgb_pixels_masked.std(axis=0)
|
||||
gen_means = np_image_masked.mean(axis=0)
|
||||
gen_std = np_image_masked.std(axis=0)
|
||||
|
||||
# Color correct
|
||||
np_matched_result = np_image.copy()
|
||||
np_matched_result[:,:,:] = (((np_matched_result[:,:,:].astype(np.float32) - gen_means[None,None,:]) / gen_std[None,None,:]) * init_std[None,None,:] + init_means[None,None,:]).clip(0, 255).astype(np.uint8)
|
||||
matched_result = Image.fromarray(np_matched_result, mode='RGB')
|
||||
else:
|
||||
matched_result = Image.fromarray(np_image, mode='RGB')
|
||||
|
||||
# Blur the mask out (into init image) by specified amount
|
||||
if mask_blur_radius > 0:
|
||||
nm = np.asarray(pil_init_mask, dtype=np.uint8)
|
||||
nmd = cv.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2))
|
||||
pmd = Image.fromarray(nmd, mode='L')
|
||||
blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius))
|
||||
else:
|
||||
blurred_init_mask = pil_init_mask
|
||||
|
||||
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
|
||||
|
||||
# Paste original on color-corrected generation (using blurred mask)
|
||||
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
||||
return matched_result
|
||||
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(self,samples):
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||
|
||||
# these updated numbers for v1.5 are from @torridgristle
|
||||
v1_5_latent_rgb_factors = torch.tensor([
|
||||
# R G B
|
||||
[ 0.3444, 0.1385, 0.0670], # L1
|
||||
[ 0.1247, 0.4027, 0.1494], # L2
|
||||
[-0.3192, 0.2513, 0.2103], # L3
|
||||
[-0.1307, -0.1874, -0.7445] # L4
|
||||
], dtype=samples.dtype, device=samples.device)
|
||||
|
||||
latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors
|
||||
latents_ubyte = (((latent_image + 1) / 2)
|
||||
.clamp(0, 1) # change scale from -1..1 to 0..1
|
||||
.mul(0xFF) # to 0..255
|
||||
.byte()).cpu()
|
||||
|
||||
return Image.fromarray(latents_ubyte.numpy())
|
||||
|
||||
def generate_initial_noise(self, seed, width, height):
|
||||
initial_noise = None
|
||||
if self.variation_amount > 0 or len(self.with_variations) > 0:
|
||||
# use fixed initial noise plus random noise per iteration
|
||||
seed_everything(seed)
|
||||
initial_noise = self.get_noise(width,height)
|
||||
for v_seed, v_weight in self.with_variations:
|
||||
seed = v_seed
|
||||
seed_everything(seed)
|
||||
next_noise = self.get_noise(width,height)
|
||||
initial_noise = self.slerp(v_weight, initial_noise, next_noise)
|
||||
if self.variation_amount > 0:
|
||||
random.seed() # reset RNG to an actually random state, so we can get a random seed for variations
|
||||
seed = random.randrange(0,np.iinfo(np.uint32).max)
|
||||
return (seed, initial_noise)
|
||||
else:
|
||||
return (seed, None)
|
||||
|
||||
# returns a tensor filled with random numbers from a normal distribution
|
||||
def get_noise(self,width,height):
|
||||
"""
|
||||
Returns a tensor filled with random numbers, either form a normal distribution
|
||||
(txt2img) or from the latent image (img2img, inpaint)
|
||||
"""
|
||||
raise NotImplementedError("get_noise() must be implemented in a descendent class")
|
||||
|
||||
def get_perlin_noise(self,width,height):
|
||||
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
||||
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
||||
|
||||
def new_seed(self):
|
||||
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
return self.seed
|
||||
|
||||
def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995):
|
||||
'''
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
'''
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(self.model.device)
|
||||
|
||||
return v2
|
||||
|
||||
def safety_check(self,image:Image.Image):
|
||||
'''
|
||||
If the CompViz safety checker flags an NSFW image, we
|
||||
blur it out.
|
||||
'''
|
||||
import diffusers
|
||||
|
||||
checker = self.safety_checker['checker']
|
||||
extractor = self.safety_checker['extractor']
|
||||
features = extractor([image], return_tensors="pt")
|
||||
features.to(self.model.device)
|
||||
|
||||
# unfortunately checker requires the numpy version, so we have to convert back
|
||||
x_image = np.array(image).astype(np.float32) / 255.0
|
||||
x_image = x_image[None].transpose(0, 3, 1, 2)
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
checked_image, has_nsfw_concept = checker(images=x_image, clip_input=features.pixel_values)
|
||||
if has_nsfw_concept[0]:
|
||||
print('** An image with potential non-safe content has been detected. A blurred image will be returned. **')
|
||||
return self.blur(image)
|
||||
else:
|
||||
return image
|
||||
|
||||
def blur(self,input):
|
||||
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
try:
|
||||
caution = self.get_caution_img()
|
||||
if caution:
|
||||
blurry.paste(caution,(0,0),caution)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return blurry
|
||||
|
||||
def get_caution_img(self):
|
||||
path = None
|
||||
if self.caution_img:
|
||||
return self.caution_img
|
||||
path = Path(web_assets.__path__[0]) / CAUTION_IMG
|
||||
caution = Image.open(path)
|
||||
self.caution_img = caution.resize((caution.width // 2, caution.height //2))
|
||||
return self.caution_img
|
||||
|
||||
# this is a handy routine for debugging use. Given a generated sample,
|
||||
# convert it into a PNG image and store it at the indicated path
|
||||
def save_sample(self, sample, filepath):
|
||||
image = self.sample_to_image(sample)
|
||||
dirname = os.path.dirname(filepath) or '.'
|
||||
if not os.path.exists(dirname):
|
||||
print(f'** creating directory {dirname}')
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
image.save(filepath,'PNG')
|
||||
|
||||
def torch_dtype(self)->torch.dtype:
|
||||
return torch.float16 if self.precision == 'float16' else torch.float32
|
||||
@@ -1,501 +0,0 @@
|
||||
'''
|
||||
ldm.invoke.ckpt_generator.embiggen descends from ldm.invoke.ckpt_generator
|
||||
and generates with ldm.invoke.ckpt_generator.img2img
|
||||
'''
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from tqdm import trange
|
||||
from PIL import Image
|
||||
from ldm.invoke.ckpt_generator.base import CkptGenerator
|
||||
from ldm.invoke.ckpt_generator.img2img import CkptImg2Img
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
|
||||
class CkptEmbiggen(CkptGenerator):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
self.init_latent = None
|
||||
|
||||
# Replace generate because Embiggen doesn't need/use most of what it does normallly
|
||||
def generate(self,prompt,iterations=1,seed=None,
|
||||
image_callback=None, step_callback=None,
|
||||
**kwargs):
|
||||
|
||||
scope = choose_autocast(self.precision)
|
||||
make_image = self.get_make_image(
|
||||
prompt,
|
||||
step_callback = step_callback,
|
||||
**kwargs
|
||||
)
|
||||
results = []
|
||||
seed = seed if seed else self.new_seed()
|
||||
|
||||
# Noise will be generated by the Img2Img generator when called
|
||||
with scope(self.model.device.type), self.model.ema_scope():
|
||||
for n in trange(iterations, desc='Generating'):
|
||||
# make_image will call Img2Img which will do the equivalent of get_noise itself
|
||||
image = make_image()
|
||||
results.append([image, seed])
|
||||
if image_callback is not None:
|
||||
image_callback(image, seed, prompt_in=prompt)
|
||||
seed = self.new_seed()
|
||||
return results
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(
|
||||
self,
|
||||
prompt,
|
||||
sampler,
|
||||
steps,
|
||||
cfg_scale,
|
||||
ddim_eta,
|
||||
conditioning,
|
||||
init_img,
|
||||
strength,
|
||||
width,
|
||||
height,
|
||||
embiggen,
|
||||
embiggen_tiles,
|
||||
step_callback=None,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and multi-stage twice-baked potato layering over the img2img on the initial image
|
||||
Return value depends on the seed at the time you call it
|
||||
"""
|
||||
assert not sampler.uses_inpainting_model(), "--embiggen is not supported by inpainting models"
|
||||
|
||||
# Construct embiggen arg array, and sanity check arguments
|
||||
if embiggen == None: # embiggen can also be called with just embiggen_tiles
|
||||
embiggen = [1.0] # If not specified, assume no scaling
|
||||
elif embiggen[0] < 0:
|
||||
embiggen[0] = 1.0
|
||||
print(
|
||||
'>> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !')
|
||||
if len(embiggen) < 2:
|
||||
embiggen.append(0.75)
|
||||
elif embiggen[1] > 1.0 or embiggen[1] < 0:
|
||||
embiggen[1] = 0.75
|
||||
print('>> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !')
|
||||
if len(embiggen) < 3:
|
||||
embiggen.append(0.25)
|
||||
elif embiggen[2] < 0:
|
||||
embiggen[2] = 0.25
|
||||
print('>> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !')
|
||||
|
||||
# Convert tiles from their user-freindly count-from-one to count-from-zero, because we need to do modulo math
|
||||
# and then sort them, because... people.
|
||||
if embiggen_tiles:
|
||||
embiggen_tiles = list(map(lambda n: n-1, embiggen_tiles))
|
||||
embiggen_tiles.sort()
|
||||
|
||||
if strength >= 0.5:
|
||||
print(f'* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45.')
|
||||
|
||||
# Prep img2img generator, since we wrap over it
|
||||
gen_img2img = CkptImg2Img(self.model,self.precision)
|
||||
|
||||
# Open original init image (not a tensor) to manipulate
|
||||
initsuperimage = Image.open(init_img)
|
||||
|
||||
with Image.open(init_img) as img:
|
||||
initsuperimage = img.convert('RGB')
|
||||
|
||||
# Size of the target super init image in pixels
|
||||
initsuperwidth, initsuperheight = initsuperimage.size
|
||||
|
||||
# Increase by scaling factor if not already resized, using ESRGAN as able
|
||||
if embiggen[0] != 1.0:
|
||||
initsuperwidth = round(initsuperwidth*embiggen[0])
|
||||
initsuperheight = round(initsuperheight*embiggen[0])
|
||||
if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero
|
||||
from ldm.invoke.restoration.realesrgan import ESRGAN
|
||||
esrgan = ESRGAN()
|
||||
print(
|
||||
f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}')
|
||||
if embiggen[0] > 2:
|
||||
initsuperimage = esrgan.process(
|
||||
initsuperimage,
|
||||
embiggen[1], # upscale strength
|
||||
self.seed,
|
||||
4, # upscale scale
|
||||
)
|
||||
else:
|
||||
initsuperimage = esrgan.process(
|
||||
initsuperimage,
|
||||
embiggen[1], # upscale strength
|
||||
self.seed,
|
||||
2, # upscale scale
|
||||
)
|
||||
# We could keep recursively re-running ESRGAN for a requested embiggen[0] larger than 4x
|
||||
# but from personal experiance it doesn't greatly improve anything after 4x
|
||||
# Resize to target scaling factor resolution
|
||||
initsuperimage = initsuperimage.resize(
|
||||
(initsuperwidth, initsuperheight), Image.Resampling.LANCZOS)
|
||||
|
||||
# Use width and height as tile widths and height
|
||||
# Determine buffer size in pixels
|
||||
if embiggen[2] < 1:
|
||||
if embiggen[2] < 0:
|
||||
embiggen[2] = 0
|
||||
overlap_size_x = round(embiggen[2] * width)
|
||||
overlap_size_y = round(embiggen[2] * height)
|
||||
else:
|
||||
overlap_size_x = round(embiggen[2])
|
||||
overlap_size_y = round(embiggen[2])
|
||||
|
||||
# With overall image width and height known, determine how many tiles we need
|
||||
def ceildiv(a, b):
|
||||
return -1 * (-a // b)
|
||||
|
||||
# X and Y needs to be determined independantly (we may have savings on one based on the buffer pixel count)
|
||||
# (initsuperwidth - width) is the area remaining to the right that we need to layers tiles to fill
|
||||
# (width - overlap_size_x) is how much new we can fill with a single tile
|
||||
emb_tiles_x = 1
|
||||
emb_tiles_y = 1
|
||||
if (initsuperwidth - width) > 0:
|
||||
emb_tiles_x = ceildiv(initsuperwidth - width,
|
||||
width - overlap_size_x) + 1
|
||||
if (initsuperheight - height) > 0:
|
||||
emb_tiles_y = ceildiv(initsuperheight - height,
|
||||
height - overlap_size_y) + 1
|
||||
# Sanity
|
||||
assert emb_tiles_x > 1 or emb_tiles_y > 1, f'ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don\'t need to Embiggen! Check your arguments.'
|
||||
|
||||
# Prep alpha layers --------------
|
||||
# https://stackoverflow.com/questions/69321734/how-to-create-different-transparency-like-gradient-with-python-pil
|
||||
# agradientL is Left-side transparent
|
||||
agradientL = Image.linear_gradient('L').rotate(
|
||||
90).resize((overlap_size_x, height))
|
||||
# agradientT is Top-side transparent
|
||||
agradientT = Image.linear_gradient('L').resize((width, overlap_size_y))
|
||||
# radial corner is the left-top corner, made full circle then cut to just the left-top quadrant
|
||||
agradientC = Image.new('L', (256, 256))
|
||||
for y in range(256):
|
||||
for x in range(256):
|
||||
# Find distance to lower right corner (numpy takes arrays)
|
||||
distanceToLR = np.sqrt([(255 - x) ** 2 + (255 - y) ** 2])[0]
|
||||
# Clamp values to max 255
|
||||
if distanceToLR > 255:
|
||||
distanceToLR = 255
|
||||
#Place the pixel as invert of distance
|
||||
agradientC.putpixel((x, y), round(255 - distanceToLR))
|
||||
|
||||
# Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges
|
||||
# Fits for a left-fading gradient on the bottom side and full opacity on the right side.
|
||||
agradientAsymC = Image.new('L', (256, 256))
|
||||
for y in range(256):
|
||||
for x in range(256):
|
||||
value = round(max(0, x-(255-y)) * (255 / max(1,y)))
|
||||
#Clamp values
|
||||
value = max(0, value)
|
||||
value = min(255, value)
|
||||
agradientAsymC.putpixel((x, y), value)
|
||||
|
||||
# Create alpha layers default fully white
|
||||
alphaLayerL = Image.new("L", (width, height), 255)
|
||||
alphaLayerT = Image.new("L", (width, height), 255)
|
||||
alphaLayerLTC = Image.new("L", (width, height), 255)
|
||||
# Paste gradients into alpha layers
|
||||
alphaLayerL.paste(agradientL, (0, 0))
|
||||
alphaLayerT.paste(agradientT, (0, 0))
|
||||
alphaLayerLTC.paste(agradientL, (0, 0))
|
||||
alphaLayerLTC.paste(agradientT, (0, 0))
|
||||
alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0))
|
||||
# make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile
|
||||
# to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space
|
||||
alphaLayerTaC = alphaLayerT.copy()
|
||||
alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
|
||||
alphaLayerLTaC = alphaLayerLTC.copy()
|
||||
alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
|
||||
|
||||
if embiggen_tiles:
|
||||
# Individual unconnected sides
|
||||
alphaLayerR = Image.new("L", (width, height), 255)
|
||||
alphaLayerR.paste(agradientL.rotate(
|
||||
180), (width - overlap_size_x, 0))
|
||||
alphaLayerB = Image.new("L", (width, height), 255)
|
||||
alphaLayerB.paste(agradientT.rotate(
|
||||
180), (0, height - overlap_size_y))
|
||||
alphaLayerTB = Image.new("L", (width, height), 255)
|
||||
alphaLayerTB.paste(agradientT, (0, 0))
|
||||
alphaLayerTB.paste(agradientT.rotate(
|
||||
180), (0, height - overlap_size_y))
|
||||
alphaLayerLR = Image.new("L", (width, height), 255)
|
||||
alphaLayerLR.paste(agradientL, (0, 0))
|
||||
alphaLayerLR.paste(agradientL.rotate(
|
||||
180), (width - overlap_size_x, 0))
|
||||
|
||||
# Sides and corner Layers
|
||||
alphaLayerRBC = Image.new("L", (width, height), 255)
|
||||
alphaLayerRBC.paste(agradientL.rotate(
|
||||
180), (width - overlap_size_x, 0))
|
||||
alphaLayerRBC.paste(agradientT.rotate(
|
||||
180), (0, height - overlap_size_y))
|
||||
alphaLayerRBC.paste(agradientC.rotate(180).resize(
|
||||
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y))
|
||||
alphaLayerLBC = Image.new("L", (width, height), 255)
|
||||
alphaLayerLBC.paste(agradientL, (0, 0))
|
||||
alphaLayerLBC.paste(agradientT.rotate(
|
||||
180), (0, height - overlap_size_y))
|
||||
alphaLayerLBC.paste(agradientC.rotate(90).resize(
|
||||
(overlap_size_x, overlap_size_y)), (0, height - overlap_size_y))
|
||||
alphaLayerRTC = Image.new("L", (width, height), 255)
|
||||
alphaLayerRTC.paste(agradientL.rotate(
|
||||
180), (width - overlap_size_x, 0))
|
||||
alphaLayerRTC.paste(agradientT, (0, 0))
|
||||
alphaLayerRTC.paste(agradientC.rotate(270).resize(
|
||||
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
|
||||
|
||||
# All but X layers
|
||||
alphaLayerABT = Image.new("L", (width, height), 255)
|
||||
alphaLayerABT.paste(alphaLayerLBC, (0, 0))
|
||||
alphaLayerABT.paste(agradientL.rotate(
|
||||
180), (width - overlap_size_x, 0))
|
||||
alphaLayerABT.paste(agradientC.rotate(180).resize(
|
||||
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y))
|
||||
alphaLayerABL = Image.new("L", (width, height), 255)
|
||||
alphaLayerABL.paste(alphaLayerRTC, (0, 0))
|
||||
alphaLayerABL.paste(agradientT.rotate(
|
||||
180), (0, height - overlap_size_y))
|
||||
alphaLayerABL.paste(agradientC.rotate(180).resize(
|
||||
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y))
|
||||
alphaLayerABR = Image.new("L", (width, height), 255)
|
||||
alphaLayerABR.paste(alphaLayerLBC, (0, 0))
|
||||
alphaLayerABR.paste(agradientT, (0, 0))
|
||||
alphaLayerABR.paste(agradientC.resize(
|
||||
(overlap_size_x, overlap_size_y)), (0, 0))
|
||||
alphaLayerABB = Image.new("L", (width, height), 255)
|
||||
alphaLayerABB.paste(alphaLayerRTC, (0, 0))
|
||||
alphaLayerABB.paste(agradientL, (0, 0))
|
||||
alphaLayerABB.paste(agradientC.resize(
|
||||
(overlap_size_x, overlap_size_y)), (0, 0))
|
||||
|
||||
# All-around layer
|
||||
alphaLayerAA = Image.new("L", (width, height), 255)
|
||||
alphaLayerAA.paste(alphaLayerABT, (0, 0))
|
||||
alphaLayerAA.paste(agradientT, (0, 0))
|
||||
alphaLayerAA.paste(agradientC.resize(
|
||||
(overlap_size_x, overlap_size_y)), (0, 0))
|
||||
alphaLayerAA.paste(agradientC.rotate(270).resize(
|
||||
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
|
||||
|
||||
# Clean up temporary gradients
|
||||
del agradientL
|
||||
del agradientT
|
||||
del agradientC
|
||||
|
||||
def make_image():
|
||||
# Make main tiles -------------------------------------------------
|
||||
if embiggen_tiles:
|
||||
print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...')
|
||||
else:
|
||||
print(
|
||||
f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...')
|
||||
|
||||
emb_tile_store = []
|
||||
# Although we could use the same seed for every tile for determinism, at higher strengths this may
|
||||
# produce duplicated structures for each tile and make the tiling effect more obvious
|
||||
# instead track and iterate a local seed we pass to Img2Img
|
||||
seed = self.seed
|
||||
seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy
|
||||
|
||||
for tile in range(emb_tiles_x * emb_tiles_y):
|
||||
# Don't iterate on first tile
|
||||
if tile != 0:
|
||||
if seed < seedintlimit:
|
||||
seed += 1
|
||||
else:
|
||||
seed = 0
|
||||
|
||||
# Determine if this is a re-run and replace
|
||||
if embiggen_tiles and not tile in embiggen_tiles:
|
||||
continue
|
||||
# Get row and column entries
|
||||
emb_row_i = tile // emb_tiles_x
|
||||
emb_column_i = tile % emb_tiles_x
|
||||
# Determine bounds to cut up the init image
|
||||
# Determine upper-left point
|
||||
if emb_column_i + 1 == emb_tiles_x:
|
||||
left = initsuperwidth - width
|
||||
else:
|
||||
left = round(emb_column_i * (width - overlap_size_x))
|
||||
if emb_row_i + 1 == emb_tiles_y:
|
||||
top = initsuperheight - height
|
||||
else:
|
||||
top = round(emb_row_i * (height - overlap_size_y))
|
||||
right = left + width
|
||||
bottom = top + height
|
||||
|
||||
# Cropped image of above dimension (does not modify the original)
|
||||
newinitimage = initsuperimage.crop((left, top, right, bottom))
|
||||
# DEBUG:
|
||||
# newinitimagepath = init_img[0:-4] + f'_emb_Ti{tile}.png'
|
||||
# newinitimage.save(newinitimagepath)
|
||||
|
||||
if embiggen_tiles:
|
||||
print(
|
||||
f'Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)')
|
||||
else:
|
||||
print(
|
||||
f'Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles')
|
||||
|
||||
# create a torch tensor from an Image
|
||||
newinitimage = np.array(
|
||||
newinitimage).astype(np.float32) / 255.0
|
||||
newinitimage = newinitimage[None].transpose(0, 3, 1, 2)
|
||||
newinitimage = torch.from_numpy(newinitimage)
|
||||
newinitimage = 2.0 * newinitimage - 1.0
|
||||
newinitimage = newinitimage.to(self.model.device)
|
||||
|
||||
tile_results = gen_img2img.generate(
|
||||
prompt,
|
||||
iterations = 1,
|
||||
seed = seed,
|
||||
sampler = DDIMSampler(self.model, device=self.model.device),
|
||||
steps = steps,
|
||||
cfg_scale = cfg_scale,
|
||||
conditioning = conditioning,
|
||||
ddim_eta = ddim_eta,
|
||||
image_callback = None, # called only after the final image is generated
|
||||
step_callback = step_callback, # called after each intermediate image is generated
|
||||
width = width,
|
||||
height = height,
|
||||
init_image = newinitimage, # notice that init_image is different from init_img
|
||||
mask_image = None,
|
||||
strength = strength,
|
||||
)
|
||||
|
||||
emb_tile_store.append(tile_results[0][0])
|
||||
# DEBUG (but, also has other uses), worth saving if you want tiles without a transparency overlap to manually composite
|
||||
# emb_tile_store[-1].save(init_img[0:-4] + f'_emb_To{tile}.png')
|
||||
del newinitimage
|
||||
|
||||
# Sanity check we have them all
|
||||
if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or (embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles)):
|
||||
outputsuperimage = Image.new(
|
||||
"RGBA", (initsuperwidth, initsuperheight))
|
||||
if embiggen_tiles:
|
||||
outputsuperimage.alpha_composite(
|
||||
initsuperimage.convert('RGBA'), (0, 0))
|
||||
for tile in range(emb_tiles_x * emb_tiles_y):
|
||||
if embiggen_tiles:
|
||||
if tile in embiggen_tiles:
|
||||
intileimage = emb_tile_store.pop(0)
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
intileimage = emb_tile_store[tile]
|
||||
intileimage = intileimage.convert('RGBA')
|
||||
# Get row and column entries
|
||||
emb_row_i = tile // emb_tiles_x
|
||||
emb_column_i = tile % emb_tiles_x
|
||||
if emb_row_i == 0 and emb_column_i == 0 and not embiggen_tiles:
|
||||
left = 0
|
||||
top = 0
|
||||
else:
|
||||
# Determine upper-left point
|
||||
if emb_column_i + 1 == emb_tiles_x:
|
||||
left = initsuperwidth - width
|
||||
else:
|
||||
left = round(emb_column_i *
|
||||
(width - overlap_size_x))
|
||||
if emb_row_i + 1 == emb_tiles_y:
|
||||
top = initsuperheight - height
|
||||
else:
|
||||
top = round(emb_row_i * (height - overlap_size_y))
|
||||
# Handle gradients for various conditions
|
||||
# Handle emb_rerun case
|
||||
if embiggen_tiles:
|
||||
# top of image
|
||||
if emb_row_i == 0:
|
||||
if emb_column_i == 0:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) not in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerB)
|
||||
# Otherwise do nothing on this tile
|
||||
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
|
||||
intileimage.putalpha(alphaLayerR)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerRBC)
|
||||
elif emb_column_i == emb_tiles_x - 1:
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerL)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerLBC)
|
||||
else:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerL)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerLBC)
|
||||
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
|
||||
intileimage.putalpha(alphaLayerLR)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABT)
|
||||
# bottom of image
|
||||
elif emb_row_i == emb_tiles_y - 1:
|
||||
if emb_column_i == 0:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
intileimage.putalpha(alphaLayerTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerRTC)
|
||||
elif emb_column_i == emb_tiles_x - 1:
|
||||
# No tiles to look ahead to
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
else:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
intileimage.putalpha(alphaLayerLTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABB)
|
||||
# vertical middle of image
|
||||
else:
|
||||
if emb_column_i == 0:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerTB)
|
||||
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
|
||||
intileimage.putalpha(alphaLayerRTC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABL)
|
||||
elif emb_column_i == emb_tiles_x - 1:
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABR)
|
||||
else:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerLTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABR)
|
||||
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
|
||||
intileimage.putalpha(alphaLayerABB)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerAA)
|
||||
# Handle normal tiling case (much simpler - since we tile left to right, top to bottom)
|
||||
else:
|
||||
if emb_row_i == 0 and emb_column_i >= 1:
|
||||
intileimage.putalpha(alphaLayerL)
|
||||
elif emb_row_i >= 1 and emb_column_i == 0:
|
||||
if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
|
||||
intileimage.putalpha(alphaLayerT)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerTaC)
|
||||
else:
|
||||
if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerLTaC)
|
||||
# Layer tile onto final image
|
||||
outputsuperimage.alpha_composite(intileimage, (left, top))
|
||||
else:
|
||||
print(f'Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.')
|
||||
|
||||
# after internal loops and patching up return Embiggen image
|
||||
return outputsuperimage
|
||||
# end of function declaration
|
||||
return make_image
|
||||
@@ -1,97 +0,0 @@
|
||||
'''
|
||||
ldm.invoke.ckpt_generator.img2img descends from ldm.invoke.generator
|
||||
'''
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
import PIL
|
||||
from torch import Tensor
|
||||
from PIL import Image
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.invoke.ckpt_generator.base import CkptGenerator
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
|
||||
class CkptImg2Img(CkptGenerator):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
self.init_latent = None # by get_noise()
|
||||
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,init_image,strength,step_callback=None,threshold=0.0,perlin=0.0,**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it.
|
||||
"""
|
||||
self.perlin = perlin
|
||||
|
||||
sampler.make_schedule(
|
||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||
)
|
||||
|
||||
if isinstance(init_image, PIL.Image.Image):
|
||||
init_image = self._image_to_tensor(init_image.convert('RGB'))
|
||||
|
||||
scope = choose_autocast(self.precision)
|
||||
with scope(self.model.device.type):
|
||||
self.init_latent = self.model.get_first_stage_encoding(
|
||||
self.model.encode_first_stage(init_image)
|
||||
) # move to latent space
|
||||
|
||||
t_enc = int(strength * steps)
|
||||
uc, c, extra_conditioning_info = conditioning
|
||||
|
||||
def make_image(x_T):
|
||||
# encode (scaled latent)
|
||||
z_enc = sampler.stochastic_encode(
|
||||
self.init_latent,
|
||||
torch.tensor([t_enc - 1]).to(self.model.device),
|
||||
noise=x_T
|
||||
)
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
# decode it
|
||||
samples = sampler.decode(
|
||||
z_enc,
|
||||
c,
|
||||
t_enc,
|
||||
img_callback = step_callback,
|
||||
unconditional_guidance_scale=cfg_scale,
|
||||
unconditional_conditioning=uc,
|
||||
init_latent = self.init_latent, # changes how noising is performed in ksampler
|
||||
extra_conditioning_info = extra_conditioning_info,
|
||||
all_timesteps_count = steps
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to("cpu")
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
return make_image
|
||||
|
||||
def get_noise(self,width,height):
|
||||
device = self.model.device
|
||||
init_latent = self.init_latent
|
||||
assert init_latent is not None,'call to get_noise() when init_latent not set'
|
||||
if device.type == 'mps':
|
||||
x = torch.randn_like(init_latent, device='cpu').to(device)
|
||||
else:
|
||||
x = torch.randn_like(init_latent, device=device)
|
||||
if self.perlin > 0.0:
|
||||
shape = init_latent.shape
|
||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
|
||||
return x
|
||||
|
||||
def _image_to_tensor(self, image:Image, normalize:bool=True)->Tensor:
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
if len(image.shape) == 2: # 'L' image, as in a mask
|
||||
image = image[None,None]
|
||||
else: # 'RGB' image
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
if normalize:
|
||||
image = 2.0 * image - 1.0
|
||||
return image.to(self.model.device)
|
||||
@@ -1,358 +0,0 @@
|
||||
'''
|
||||
ldm.invoke.ckpt_generator.inpaint descends from ldm.invoke.ckpt_generator
|
||||
'''
|
||||
|
||||
import math
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import PIL
|
||||
from PIL import Image, ImageFilter, ImageOps, ImageChops
|
||||
from skimage.exposure.histogram_matching import match_histograms
|
||||
from einops import rearrange, repeat
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.invoke.ckpt_generator.img2img import CkptImg2Img
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.ksampler import KSampler
|
||||
from ldm.invoke.generator.base import downsampling
|
||||
from ldm.util import debug_image
|
||||
from ldm.invoke.patchmatch import PatchMatch
|
||||
from ldm.invoke.globals import Globals
|
||||
|
||||
def infill_methods()->list[str]:
|
||||
methods = list()
|
||||
if PatchMatch.patchmatch_available():
|
||||
methods.append('patchmatch')
|
||||
methods.append('tile')
|
||||
return methods
|
||||
|
||||
class CkptInpaint(CkptImg2Img):
|
||||
def __init__(self, model, precision):
|
||||
self.init_latent = None
|
||||
self.pil_image = None
|
||||
self.pil_mask = None
|
||||
self.mask_blur_radius = 0
|
||||
self.infill_method = None
|
||||
super().__init__(model, precision)
|
||||
|
||||
# Outpaint support code
|
||||
def get_tile_images(self, image: np.ndarray, width=8, height=8):
|
||||
_nrows, _ncols, depth = image.shape
|
||||
_strides = image.strides
|
||||
|
||||
nrows, _m = divmod(_nrows, height)
|
||||
ncols, _n = divmod(_ncols, width)
|
||||
if _m != 0 or _n != 0:
|
||||
return None
|
||||
|
||||
return np.lib.stride_tricks.as_strided(
|
||||
np.ravel(image),
|
||||
shape=(nrows, ncols, height, width, depth),
|
||||
strides=(height * _strides[0], width * _strides[1], *_strides),
|
||||
writeable=False
|
||||
)
|
||||
|
||||
def infill_patchmatch(self, im: Image.Image) -> Image:
|
||||
if im.mode != 'RGBA':
|
||||
return im
|
||||
|
||||
# Skip patchmatch if patchmatch isn't available
|
||||
if not PatchMatch.patchmatch_available():
|
||||
return im
|
||||
|
||||
# Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though)
|
||||
im_patched_np = PatchMatch.inpaint(im.convert('RGB'), ImageOps.invert(im.split()[-1]), patch_size = 3)
|
||||
im_patched = Image.fromarray(im_patched_np, mode = 'RGB')
|
||||
return im_patched
|
||||
|
||||
def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: int = None) -> Image:
|
||||
# Only fill if there's an alpha layer
|
||||
if im.mode != 'RGBA':
|
||||
return im
|
||||
|
||||
a = np.asarray(im, dtype=np.uint8)
|
||||
|
||||
tile_size = (tile_size, tile_size)
|
||||
|
||||
# Get the image as tiles of a specified size
|
||||
tiles = self.get_tile_images(a,*tile_size).copy()
|
||||
|
||||
# Get the mask as tiles
|
||||
tiles_mask = tiles[:,:,:,:,3]
|
||||
|
||||
# Find any mask tiles with any fully transparent pixels (we will be replacing these later)
|
||||
tmask_shape = tiles_mask.shape
|
||||
tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape))
|
||||
n,ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:])
|
||||
tiles_mask = (tiles_mask > 0)
|
||||
tiles_mask = tiles_mask.reshape((n,ny)).all(axis = 1)
|
||||
|
||||
# Get RGB tiles in single array and filter by the mask
|
||||
tshape = tiles.shape
|
||||
tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), * tiles.shape[2:]))
|
||||
filtered_tiles = tiles_all[tiles_mask]
|
||||
|
||||
if len(filtered_tiles) == 0:
|
||||
return im
|
||||
|
||||
# Find all invalid tiles and replace with a random valid tile
|
||||
replace_count = (tiles_mask == False).sum()
|
||||
rng = np.random.default_rng(seed = seed)
|
||||
tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count),:,:,:]
|
||||
|
||||
# Convert back to an image
|
||||
tiles_all = tiles_all.reshape(tshape)
|
||||
tiles_all = tiles_all.swapaxes(1,2)
|
||||
st = tiles_all.reshape((math.prod(tiles_all.shape[0:2]), math.prod(tiles_all.shape[2:4]), tiles_all.shape[4]))
|
||||
si = Image.fromarray(st, mode='RGBA')
|
||||
|
||||
return si
|
||||
|
||||
|
||||
def mask_edge(self, mask: Image, edge_size: int, edge_blur: int) -> Image:
|
||||
npimg = np.asarray(mask, dtype=np.uint8)
|
||||
|
||||
# Detect any partially transparent regions
|
||||
npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0)))
|
||||
|
||||
# Detect hard edges
|
||||
npedge = cv.Canny(npimg, threshold1=100, threshold2=200)
|
||||
|
||||
# Combine
|
||||
npmask = npgradient + npedge
|
||||
|
||||
# Expand
|
||||
npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2))
|
||||
|
||||
new_mask = Image.fromarray(npmask)
|
||||
|
||||
if edge_blur > 0:
|
||||
new_mask = new_mask.filter(ImageFilter.BoxBlur(edge_blur))
|
||||
|
||||
return ImageOps.invert(new_mask)
|
||||
|
||||
|
||||
def seam_paint(self,
|
||||
im: Image.Image,
|
||||
seam_size: int,
|
||||
seam_blur: int,
|
||||
prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,strength,
|
||||
noise,
|
||||
step_callback
|
||||
) -> Image.Image:
|
||||
hard_mask = self.pil_image.split()[-1].copy()
|
||||
mask = self.mask_edge(hard_mask, seam_size, seam_blur)
|
||||
|
||||
make_image = self.get_make_image(
|
||||
prompt,
|
||||
sampler,
|
||||
steps,
|
||||
cfg_scale,
|
||||
ddim_eta,
|
||||
conditioning,
|
||||
init_image = im.copy().convert('RGBA'),
|
||||
mask_image = mask.convert('RGB'), # Code currently requires an RGB mask
|
||||
strength = strength,
|
||||
mask_blur_radius = 0,
|
||||
seam_size = 0,
|
||||
step_callback = step_callback,
|
||||
inpaint_width = im.width,
|
||||
inpaint_height = im.height
|
||||
)
|
||||
|
||||
seam_noise = self.get_noise(im.width, im.height)
|
||||
|
||||
result = make_image(seam_noise)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,init_image,mask_image,strength,
|
||||
mask_blur_radius: int = 8,
|
||||
# Seam settings - when 0, doesn't fill seam
|
||||
seam_size: int = 0,
|
||||
seam_blur: int = 0,
|
||||
seam_strength: float = 0.7,
|
||||
seam_steps: int = 10,
|
||||
tile_size: int = 32,
|
||||
step_callback=None,
|
||||
inpaint_replace=False, enable_image_debugging=False,
|
||||
infill_method = None,
|
||||
inpaint_width=None,
|
||||
inpaint_height=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and
|
||||
the initial image + mask. Return value depends on the seed at
|
||||
the time you call it. kwargs are 'init_latent' and 'strength'
|
||||
"""
|
||||
|
||||
self.enable_image_debugging = enable_image_debugging
|
||||
self.infill_method = infill_method or infill_methods()[0], # The infill method to use
|
||||
|
||||
self.inpaint_width = inpaint_width
|
||||
self.inpaint_height = inpaint_height
|
||||
|
||||
if isinstance(init_image, PIL.Image.Image):
|
||||
self.pil_image = init_image.copy()
|
||||
|
||||
# Do infill
|
||||
if infill_method == 'patchmatch' and PatchMatch.patchmatch_available():
|
||||
init_filled = self.infill_patchmatch(self.pil_image.copy())
|
||||
else: # if infill_method == 'tile': # Only two methods right now, so always use 'tile' if not patchmatch
|
||||
init_filled = self.tile_fill_missing(
|
||||
self.pil_image.copy(),
|
||||
seed = self.seed,
|
||||
tile_size = tile_size
|
||||
)
|
||||
init_filled.paste(init_image, (0,0), init_image.split()[-1])
|
||||
|
||||
# Resize if requested for inpainting
|
||||
if inpaint_width and inpaint_height:
|
||||
init_filled = init_filled.resize((inpaint_width, inpaint_height))
|
||||
|
||||
debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging)
|
||||
|
||||
# Create init tensor
|
||||
init_image = self._image_to_tensor(init_filled.convert('RGB'))
|
||||
|
||||
if isinstance(mask_image, PIL.Image.Image):
|
||||
self.pil_mask = mask_image.copy()
|
||||
debug_image(mask_image, "mask_image BEFORE multiply with pil_image", debug_status=self.enable_image_debugging)
|
||||
|
||||
mask_image = ImageChops.multiply(mask_image, self.pil_image.split()[-1].convert('RGB'))
|
||||
self.pil_mask = mask_image
|
||||
|
||||
# Resize if requested for inpainting
|
||||
if inpaint_width and inpaint_height:
|
||||
mask_image = mask_image.resize((inpaint_width, inpaint_height))
|
||||
|
||||
debug_image(mask_image, "mask_image AFTER multiply with pil_image", debug_status=self.enable_image_debugging)
|
||||
mask_image = mask_image.resize(
|
||||
(
|
||||
mask_image.width // downsampling,
|
||||
mask_image.height // downsampling
|
||||
),
|
||||
resample=Image.Resampling.NEAREST
|
||||
)
|
||||
mask_image = self._image_to_tensor(mask_image,normalize=False)
|
||||
|
||||
self.mask_blur_radius = mask_blur_radius
|
||||
|
||||
# klms samplers not supported yet, so ignore previous sampler
|
||||
if isinstance(sampler,KSampler):
|
||||
print(
|
||||
f">> Using recommended DDIM sampler for inpainting."
|
||||
)
|
||||
sampler = DDIMSampler(self.model, device=self.model.device)
|
||||
|
||||
sampler.make_schedule(
|
||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||
)
|
||||
|
||||
mask_image = mask_image[0][0].unsqueeze(0).repeat(4,1,1).unsqueeze(0)
|
||||
mask_image = repeat(mask_image, '1 ... -> b ...', b=1)
|
||||
|
||||
scope = choose_autocast(self.precision)
|
||||
with scope(self.model.device.type):
|
||||
self.init_latent = self.model.get_first_stage_encoding(
|
||||
self.model.encode_first_stage(init_image)
|
||||
) # move to latent space
|
||||
|
||||
t_enc = int(strength * steps)
|
||||
# todo: support cross-attention control
|
||||
uc, c, _ = conditioning
|
||||
|
||||
print(f">> target t_enc is {t_enc} steps")
|
||||
|
||||
@torch.no_grad()
|
||||
def make_image(x_T):
|
||||
# encode (scaled latent)
|
||||
z_enc = sampler.stochastic_encode(
|
||||
self.init_latent,
|
||||
torch.tensor([t_enc - 1]).to(self.model.device),
|
||||
noise=x_T
|
||||
)
|
||||
|
||||
# to replace masked area with latent noise, weighted by inpaint_replace strength
|
||||
if inpaint_replace > 0.0:
|
||||
print(f'>> inpaint will replace what was under the mask with a strength of {inpaint_replace}')
|
||||
l_noise = self.get_noise(kwargs['width'],kwargs['height'])
|
||||
inverted_mask = 1.0-mask_image # there will be 1s where the mask is
|
||||
masked_region = (1.0-inpaint_replace) * inverted_mask * z_enc + inpaint_replace * inverted_mask * l_noise
|
||||
z_enc = z_enc * mask_image + masked_region
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
# decode it
|
||||
samples = sampler.decode(
|
||||
z_enc,
|
||||
c,
|
||||
t_enc,
|
||||
img_callback = step_callback,
|
||||
unconditional_guidance_scale = cfg_scale,
|
||||
unconditional_conditioning = uc,
|
||||
mask = mask_image,
|
||||
init_latent = self.init_latent
|
||||
)
|
||||
|
||||
result = self.sample_to_image(samples)
|
||||
|
||||
# Seam paint if this is our first pass (seam_size set to 0 during seam painting)
|
||||
if seam_size > 0:
|
||||
old_image = self.pil_image or init_image
|
||||
old_mask = self.pil_mask or mask_image
|
||||
|
||||
result = self.seam_paint(
|
||||
result,
|
||||
seam_size,
|
||||
seam_blur,
|
||||
prompt,
|
||||
sampler,
|
||||
seam_steps,
|
||||
cfg_scale,
|
||||
ddim_eta,
|
||||
conditioning,
|
||||
seam_strength,
|
||||
x_T,
|
||||
step_callback)
|
||||
|
||||
# Restore original settings
|
||||
self.get_make_image(prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,
|
||||
old_image,
|
||||
old_mask,
|
||||
strength,
|
||||
mask_blur_radius, seam_size, seam_blur, seam_strength,
|
||||
seam_steps, tile_size, step_callback,
|
||||
inpaint_replace, enable_image_debugging,
|
||||
inpaint_width = inpaint_width,
|
||||
inpaint_height = inpaint_height,
|
||||
infill_method = infill_method,
|
||||
**kwargs)
|
||||
|
||||
return result
|
||||
|
||||
return make_image
|
||||
|
||||
|
||||
def sample_to_image(self, samples)->Image.Image:
|
||||
gen_result = super().sample_to_image(samples).convert('RGB')
|
||||
debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging)
|
||||
|
||||
# Resize if necessary
|
||||
if self.inpaint_width and self.inpaint_height:
|
||||
gen_result = gen_result.resize(self.pil_image.size)
|
||||
|
||||
if self.pil_image is None or self.pil_mask is None:
|
||||
return gen_result
|
||||
|
||||
corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
|
||||
debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging)
|
||||
|
||||
return corrected_result
|
||||
@@ -1,175 +0,0 @@
|
||||
"""omnibus module to be used with the runwayml 9-channel custom inpainting model"""
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from einops import repeat
|
||||
from PIL import Image, ImageOps, ImageChops
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.invoke.ckpt_generator.base import downsampling
|
||||
from ldm.invoke.ckpt_generator.img2img import CkptImg2Img
|
||||
from ldm.invoke.ckpt_generator.txt2img import CkptTxt2Img
|
||||
|
||||
class CkptOmnibus(CkptImg2Img,CkptTxt2Img):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
self.pil_mask = None
|
||||
self.pil_image = None
|
||||
|
||||
def get_make_image(
|
||||
self,
|
||||
prompt,
|
||||
sampler,
|
||||
steps,
|
||||
cfg_scale,
|
||||
ddim_eta,
|
||||
conditioning,
|
||||
width,
|
||||
height,
|
||||
init_image = None,
|
||||
mask_image = None,
|
||||
strength = None,
|
||||
step_callback=None,
|
||||
threshold=0.0,
|
||||
perlin=0.0,
|
||||
mask_blur_radius: int = 8,
|
||||
**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it.
|
||||
"""
|
||||
self.perlin = perlin
|
||||
num_samples = 1
|
||||
|
||||
sampler.make_schedule(
|
||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||
)
|
||||
|
||||
if isinstance(init_image, Image.Image):
|
||||
self.pil_image = init_image
|
||||
if init_image.mode != 'RGB':
|
||||
init_image = init_image.convert('RGB')
|
||||
init_image = self._image_to_tensor(init_image)
|
||||
|
||||
if isinstance(mask_image, Image.Image):
|
||||
self.pil_mask = mask_image
|
||||
|
||||
mask_image = ImageChops.multiply(mask_image.convert('L'), self.pil_image.split()[-1])
|
||||
mask_image = self._image_to_tensor(ImageOps.invert(mask_image), normalize=False)
|
||||
|
||||
self.mask_blur_radius = mask_blur_radius
|
||||
|
||||
t_enc = steps
|
||||
|
||||
if init_image is not None and mask_image is not None: # inpainting
|
||||
masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero
|
||||
|
||||
elif init_image is not None: # img2img
|
||||
scope = choose_autocast(self.precision)
|
||||
|
||||
with scope(self.model.device.type):
|
||||
self.init_latent = self.model.get_first_stage_encoding(
|
||||
self.model.encode_first_stage(init_image)
|
||||
) # move to latent space
|
||||
|
||||
# create a completely black mask (1s)
|
||||
mask_image = torch.ones(1, 1, init_image.shape[2], init_image.shape[3], device=self.model.device)
|
||||
# and the masked image is just a copy of the original
|
||||
masked_image = init_image
|
||||
|
||||
else: # txt2img
|
||||
init_image = torch.zeros(1, 3, height, width, device=self.model.device)
|
||||
mask_image = torch.ones(1, 1, height, width, device=self.model.device)
|
||||
masked_image = init_image
|
||||
|
||||
self.init_latent = init_image
|
||||
height = init_image.shape[2]
|
||||
width = init_image.shape[3]
|
||||
model = self.model
|
||||
|
||||
def make_image(x_T):
|
||||
with torch.no_grad():
|
||||
scope = choose_autocast(self.precision)
|
||||
with scope(self.model.device.type):
|
||||
|
||||
batch = self.make_batch_sd(
|
||||
init_image,
|
||||
mask_image,
|
||||
masked_image,
|
||||
prompt=prompt,
|
||||
device=model.device,
|
||||
num_samples=num_samples,
|
||||
)
|
||||
|
||||
c = model.cond_stage_model.encode(batch["txt"])
|
||||
c_cat = list()
|
||||
for ck in model.concat_keys:
|
||||
cc = batch[ck].float()
|
||||
if ck != model.masked_image_key:
|
||||
bchw = [num_samples, 4, height//8, width//8]
|
||||
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
|
||||
else:
|
||||
cc = model.get_first_stage_encoding(model.encode_first_stage(cc))
|
||||
c_cat.append(cc)
|
||||
c_cat = torch.cat(c_cat, dim=1)
|
||||
|
||||
# cond
|
||||
cond={"c_concat": [c_cat], "c_crossattn": [c]}
|
||||
|
||||
# uncond cond
|
||||
uc_cross = model.get_unconditional_conditioning(num_samples, "")
|
||||
uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]}
|
||||
shape = [model.channels, height//8, width//8]
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
batch_size = 1,
|
||||
S = steps,
|
||||
x_T = x_T,
|
||||
conditioning = cond,
|
||||
shape = shape,
|
||||
verbose = False,
|
||||
unconditional_guidance_scale = cfg_scale,
|
||||
unconditional_conditioning = uc_full,
|
||||
eta = 1.0,
|
||||
img_callback = step_callback,
|
||||
threshold = threshold,
|
||||
)
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to("cpu")
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
return make_image
|
||||
|
||||
def make_batch_sd(
|
||||
self,
|
||||
image,
|
||||
mask,
|
||||
masked_image,
|
||||
prompt,
|
||||
device,
|
||||
num_samples=1):
|
||||
batch = {
|
||||
"image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples),
|
||||
"txt": num_samples * [prompt],
|
||||
"mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples),
|
||||
"masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples),
|
||||
}
|
||||
return batch
|
||||
|
||||
def get_noise(self, width:int, height:int):
|
||||
if self.init_latent is not None:
|
||||
height = self.init_latent.shape[2]
|
||||
width = self.init_latent.shape[3]
|
||||
return CkptTxt2Img.get_noise(self,width,height)
|
||||
|
||||
|
||||
def sample_to_image(self, samples)->Image.Image:
|
||||
gen_result = super().sample_to_image(samples).convert('RGB')
|
||||
|
||||
if self.pil_image is None or self.pil_mask is None:
|
||||
return gen_result
|
||||
if self.pil_image.size != self.pil_mask.size:
|
||||
return gen_result
|
||||
|
||||
corrected_result = super(CkptImg2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
|
||||
|
||||
return corrected_result
|
||||
@@ -1,90 +0,0 @@
|
||||
'''
|
||||
ldm.invoke.ckpt_generator.txt2img inherits from ldm.invoke.ckpt_generator
|
||||
'''
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from ldm.invoke.ckpt_generator.base import CkptGenerator
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
import gc
|
||||
|
||||
|
||||
class CkptTxt2Img(CkptGenerator):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0,
|
||||
attention_maps_callback=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it
|
||||
kwargs are 'width' and 'height'
|
||||
"""
|
||||
self.perlin = perlin
|
||||
uc, c, extra_conditioning_info = conditioning
|
||||
|
||||
@torch.no_grad()
|
||||
def make_image(x_T):
|
||||
shape = [
|
||||
self.latent_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor,
|
||||
]
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
batch_size = 1,
|
||||
S = steps,
|
||||
x_T = x_T,
|
||||
conditioning = c,
|
||||
shape = shape,
|
||||
verbose = False,
|
||||
unconditional_guidance_scale = cfg_scale,
|
||||
unconditional_conditioning = uc,
|
||||
extra_conditioning_info = extra_conditioning_info,
|
||||
eta = ddim_eta,
|
||||
img_callback = step_callback,
|
||||
threshold = threshold,
|
||||
attention_maps_callback = attention_maps_callback,
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to('cpu')
|
||||
self.model.cond_stage_model.device = 'cpu'
|
||||
self.model.cond_stage_model.to('cpu')
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
return make_image
|
||||
|
||||
|
||||
# returns a tensor filled with random numbers from a normal distribution
|
||||
def get_noise(self,width,height):
|
||||
device = self.model.device
|
||||
if self.use_mps_noise or device.type == 'mps':
|
||||
x = torch.randn([1,
|
||||
self.latent_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
dtype=self.torch_dtype(),
|
||||
device='cpu').to(device)
|
||||
else:
|
||||
x = torch.randn([1,
|
||||
self.latent_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
dtype=self.torch_dtype(),
|
||||
device=device)
|
||||
if self.perlin > 0.0:
|
||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||
return x
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
'''
|
||||
ldm.invoke.ckpt_generator.txt2img inherits from ldm.invoke.ckpt_generator
|
||||
'''
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
import math
|
||||
import gc
|
||||
from ldm.invoke.ckpt_generator.base import CkptGenerator
|
||||
from ldm.invoke.ckpt_generator.omnibus import CkptOmnibus
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from PIL import Image
|
||||
|
||||
class CkptTxt2Img2Img(CkptGenerator):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
self.init_latent = None # for get_noise()
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,width,height,strength,step_callback=None,**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it
|
||||
kwargs are 'width' and 'height'
|
||||
"""
|
||||
uc, c, extra_conditioning_info = conditioning
|
||||
scale_dim = min(width, height)
|
||||
scale = 512 / scale_dim
|
||||
|
||||
init_width = math.ceil(scale * width / 64) * 64
|
||||
init_height = math.ceil(scale * height / 64) * 64
|
||||
|
||||
@torch.no_grad()
|
||||
def make_image(x_T):
|
||||
|
||||
shape = [
|
||||
self.latent_channels,
|
||||
init_height // self.downsampling_factor,
|
||||
init_width // self.downsampling_factor,
|
||||
]
|
||||
|
||||
sampler.make_schedule(
|
||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||
)
|
||||
|
||||
#x = self.get_noise(init_width, init_height)
|
||||
x = x_T
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
batch_size = 1,
|
||||
S = steps,
|
||||
x_T = x,
|
||||
conditioning = c,
|
||||
shape = shape,
|
||||
verbose = False,
|
||||
unconditional_guidance_scale = cfg_scale,
|
||||
unconditional_conditioning = uc,
|
||||
eta = ddim_eta,
|
||||
img_callback = step_callback,
|
||||
extra_conditioning_info = extra_conditioning_info
|
||||
)
|
||||
|
||||
print(
|
||||
f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling"
|
||||
)
|
||||
|
||||
# resizing
|
||||
samples = torch.nn.functional.interpolate(
|
||||
samples,
|
||||
size=(height // self.downsampling_factor, width // self.downsampling_factor),
|
||||
mode="bilinear"
|
||||
)
|
||||
|
||||
t_enc = int(strength * steps)
|
||||
ddim_sampler = DDIMSampler(self.model, device=self.model.device)
|
||||
ddim_sampler.make_schedule(
|
||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||
)
|
||||
|
||||
z_enc = ddim_sampler.stochastic_encode(
|
||||
samples,
|
||||
torch.tensor([t_enc-1]).to(self.model.device),
|
||||
noise=self.get_noise(width,height,False)
|
||||
)
|
||||
|
||||
# decode it
|
||||
samples = ddim_sampler.decode(
|
||||
z_enc,
|
||||
c,
|
||||
t_enc,
|
||||
img_callback = step_callback,
|
||||
unconditional_guidance_scale=cfg_scale,
|
||||
unconditional_conditioning=uc,
|
||||
extra_conditioning_info=extra_conditioning_info,
|
||||
all_timesteps_count=steps
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to('cpu')
|
||||
self.model.cond_stage_model.device = 'cpu'
|
||||
self.model.cond_stage_model.to('cpu')
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
# in the case of the inpainting model being loaded, the trick of
|
||||
# providing an interpolated latent doesn't work, so we transiently
|
||||
# create a 512x512 PIL image, upscale it, and run the inpainting
|
||||
# over it in img2img mode. Because the inpaing model is so conservative
|
||||
# it doesn't change the image (much)
|
||||
def inpaint_make_image(x_T):
|
||||
omnibus = CkptOmnibus(self.model,self.precision)
|
||||
result = omnibus.generate(
|
||||
prompt,
|
||||
sampler=sampler,
|
||||
width=init_width,
|
||||
height=init_height,
|
||||
step_callback=step_callback,
|
||||
steps = steps,
|
||||
cfg_scale = cfg_scale,
|
||||
ddim_eta = ddim_eta,
|
||||
conditioning = conditioning,
|
||||
**kwargs
|
||||
)
|
||||
assert result is not None and len(result)>0,'** txt2img failed **'
|
||||
image = result[0][0]
|
||||
interpolated_image = image.resize((width,height),resample=Image.Resampling.LANCZOS)
|
||||
print(kwargs.pop('init_image',None))
|
||||
result = omnibus.generate(
|
||||
prompt,
|
||||
sampler=sampler,
|
||||
init_image=interpolated_image,
|
||||
width=width,
|
||||
height=height,
|
||||
seed=result[0][1],
|
||||
step_callback=step_callback,
|
||||
steps = steps,
|
||||
cfg_scale = cfg_scale,
|
||||
ddim_eta = ddim_eta,
|
||||
conditioning = conditioning,
|
||||
**kwargs
|
||||
)
|
||||
return result[0][0]
|
||||
|
||||
if sampler.uses_inpainting_model():
|
||||
return inpaint_make_image
|
||||
else:
|
||||
return make_image
|
||||
|
||||
# returns a tensor filled with random numbers from a normal distribution
|
||||
def get_noise(self,width,height,scale = True):
|
||||
# print(f"Get noise: {width}x{height}")
|
||||
if scale:
|
||||
trained_square = 512 * 512
|
||||
actual_square = width * height
|
||||
scale = math.sqrt(trained_square / actual_square)
|
||||
scaled_width = math.ceil(scale * width / 64) * 64
|
||||
scaled_height = math.ceil(scale * height / 64) * 64
|
||||
else:
|
||||
scaled_width = width
|
||||
scaled_height = height
|
||||
|
||||
device = self.model.device
|
||||
if self.use_mps_noise or device.type == 'mps':
|
||||
return torch.randn([1,
|
||||
self.latent_channels,
|
||||
scaled_height // self.downsampling_factor,
|
||||
scaled_width // self.downsampling_factor],
|
||||
device='cpu').to(device)
|
||||
else:
|
||||
return torch.randn([1,
|
||||
self.latent_channels,
|
||||
scaled_height // self.downsampling_factor,
|
||||
scaled_width // self.downsampling_factor],
|
||||
device=device)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -24,13 +24,15 @@ class HuggingFaceConceptsLibrary(object):
|
||||
self.concepts_loaded = dict()
|
||||
self.triggers = dict() # concept name to trigger phrase
|
||||
self.concept_names = dict() # trigger phrase to concept name
|
||||
self.match_trigger = re.compile('(<[\w\- >]+>)') # trigger is slightly less restrictive than HF concept name
|
||||
self.match_concept = re.compile('<([\w\-]+)>') # HF concept name can only contain A-Za-z0-9_-
|
||||
self.match_trigger = re.compile('(<[a-zA-Z0-9_\- >]+>)') # trigger is slightly less restrictive than HF concept name
|
||||
self.match_concept = re.compile('<([a-zA-Z0-9_\-]+)>') # HF concept name can only contain A-Za-z0-9_-
|
||||
|
||||
def list_concepts(self)->list:
|
||||
def list_concepts(self, minimum_likes: int=0)->list:
|
||||
'''
|
||||
Return a list of all the concepts by name, without the 'sd-concepts-library' part.
|
||||
Also adds local concepts in invokeai/embeddings folder.
|
||||
If minimum_likes is provided, then only concepts that have received at least that
|
||||
many "likes" will be returned.
|
||||
'''
|
||||
local_concepts_now = self.get_local_concepts(os.path.join(self.root, 'embeddings'))
|
||||
local_concepts_to_add = set(local_concepts_now).difference(set(self.local_concepts))
|
||||
@@ -44,7 +46,7 @@ class HuggingFaceConceptsLibrary(object):
|
||||
else:
|
||||
try:
|
||||
models = self.hf_api.list_models(filter=ModelFilter(model_name='sd-concepts-library/'))
|
||||
self.concept_list = [a.id.split('/')[1] for a in models]
|
||||
self.concept_list = [a.id.split('/')[1] for a in models if a.likes>=minimum_likes]
|
||||
# when init, add all in dir. when not init, add only concepts added between init and now
|
||||
self.concept_list.extend(list(local_concepts_to_add))
|
||||
except Exception as e:
|
||||
@@ -181,7 +183,7 @@ class HuggingFaceConceptsLibrary(object):
|
||||
|
||||
print(f'>> Downloading {repo_id}...',end='')
|
||||
try:
|
||||
for file in ('README.md','learned_embeds.bin','token_identifier.txt','type_of_concept.txt'):
|
||||
for file in ('learned_embeds.bin','token_identifier.txt'):
|
||||
url = hf_hub_url(repo_id, file)
|
||||
request.urlretrieve(url, os.path.join(dest,file),reporthook=tally_download_size)
|
||||
except ul_error.HTTPError as e:
|
||||
|
||||
@@ -12,7 +12,8 @@ from typing import Union, Optional, Any
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from compel import Compel
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
|
||||
Conjunction
|
||||
from .devices import torch_dtype
|
||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from ldm.invoke.globals import Globals
|
||||
@@ -55,13 +56,27 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
|
||||
# get rid of any newline characters
|
||||
prompt_string = prompt_string.replace("\n", " ")
|
||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||
|
||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||
positive_prompt: FlattenedPrompt|Blend
|
||||
positive_conjunction: Conjunction
|
||||
if legacy_blend is not None:
|
||||
positive_prompt = legacy_blend
|
||||
positive_conjunction = legacy_blend
|
||||
else:
|
||||
positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
|
||||
negative_prompt: FlattenedPrompt|Blend = Compel.parse_prompt_string(negative_prompt_string)
|
||||
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
|
||||
should_use_lora_manager = True
|
||||
lora_weights = positive_conjunction.lora_weights
|
||||
lora_conditions = None
|
||||
if model.peft_manager:
|
||||
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
|
||||
if not should_use_lora_manager:
|
||||
model.peft_manager.set_loras(lora_weights)
|
||||
if model.lora_manager and should_use_lora_manager:
|
||||
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
|
||||
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
|
||||
|
||||
if log_tokens or getattr(Globals, "log_tokenization", False):
|
||||
log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
|
||||
@@ -73,7 +88,8 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
|
||||
|
||||
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
|
||||
cross_attention_control_args=options.get(
|
||||
'cross_attention_control', None))
|
||||
'cross_attention_control', None),
|
||||
lora_conditions=lora_conditions)
|
||||
return uc, c, ec
|
||||
|
||||
|
||||
@@ -81,12 +97,14 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
|
||||
Union[FlattenedPrompt, Blend], FlattenedPrompt):
|
||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||
positive_prompt: FlattenedPrompt|Blend
|
||||
positive_conjunction: Conjunction
|
||||
if legacy_blend is not None:
|
||||
positive_prompt = legacy_blend
|
||||
positive_conjunction = legacy_blend
|
||||
else:
|
||||
positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
|
||||
negative_prompt: FlattenedPrompt|Blend = Compel.parse_prompt_string(negative_prompt_string)
|
||||
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
|
||||
|
||||
return positive_prompt, negative_prompt
|
||||
|
||||
@@ -203,18 +221,26 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
|
||||
print(f'{discarded}\x1b[0m')
|
||||
|
||||
|
||||
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]:
|
||||
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Conjunction]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
|
||||
if len(weighted_subprompts) <= 1:
|
||||
return None
|
||||
strings = [x[0] for x in weighted_subprompts]
|
||||
weights = [x[1] for x in weighted_subprompts]
|
||||
|
||||
pp = PromptParser()
|
||||
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
|
||||
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
|
||||
flattened_prompts = []
|
||||
weights = []
|
||||
loras = []
|
||||
for i, x in enumerate(parsed_conjunctions):
|
||||
if len(x.prompts)>0:
|
||||
flattened_prompts.append(x.prompts[0])
|
||||
weights.append(weighted_subprompts[i][1])
|
||||
if len(x.lora_weights)>0:
|
||||
loras.extend(x.lora_weights)
|
||||
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
|
||||
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)],
|
||||
lora_weights = loras)
|
||||
|
||||
|
||||
def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||
|
||||
@@ -655,6 +655,7 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
|
||||
"models",
|
||||
"configs",
|
||||
"embeddings",
|
||||
"loras",
|
||||
"text-inversion-output",
|
||||
"text-inversion-training-data",
|
||||
):
|
||||
|
||||
@@ -4,18 +4,19 @@ pip install <path_to_git_source>.
|
||||
'''
|
||||
import os
|
||||
import platform
|
||||
import psutil
|
||||
import requests
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
from rich.console import Console, group
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt
|
||||
from rich.style import Style
|
||||
from rich.syntax import Syntax
|
||||
from rich.text import Text
|
||||
|
||||
from ldm.invoke import __version__
|
||||
|
||||
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
|
||||
INVOKE_AI_TAG="https://github.com/invoke-ai/InvokeAI/archive/refs/tags"
|
||||
INVOKE_AI_BRANCH="https://github.com/invoke-ai/InvokeAI/archive/refs/heads"
|
||||
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||
|
||||
OS = platform.uname().system
|
||||
@@ -30,6 +31,19 @@ else:
|
||||
def get_versions()->dict:
|
||||
return requests.get(url=INVOKE_AI_REL).json()
|
||||
|
||||
def invokeai_is_running()->bool:
|
||||
for p in psutil.process_iter():
|
||||
try:
|
||||
cmdline = p.cmdline()
|
||||
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
|
||||
if matches:
|
||||
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
|
||||
return True
|
||||
except psutil.AccessDenied:
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def welcome(versions: dict):
|
||||
|
||||
@group()
|
||||
@@ -41,7 +55,8 @@ def welcome(versions: dict):
|
||||
yield '[bold yellow]Options:'
|
||||
yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
|
||||
[2] Update to the bleeding-edge development version ([italic]main[/italic])
|
||||
[3] Manually enter the tag or branch name you wish to update'''
|
||||
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
|
||||
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to'''
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@@ -59,20 +74,33 @@ def welcome(versions: dict):
|
||||
|
||||
def main():
|
||||
versions = get_versions()
|
||||
if invokeai_is_running():
|
||||
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
|
||||
return
|
||||
|
||||
welcome(versions)
|
||||
|
||||
tag = None
|
||||
choice = Prompt.ask('Choice:',choices=['1','2','3'],default='1')
|
||||
branch = None
|
||||
release = None
|
||||
choice = Prompt.ask('Choice:',choices=['1','2','3','4'],default='1')
|
||||
|
||||
if choice=='1':
|
||||
tag = versions[0]['tag_name']
|
||||
release = versions[0]['tag_name']
|
||||
elif choice=='2':
|
||||
tag = 'main'
|
||||
release = 'main'
|
||||
elif choice=='3':
|
||||
tag = Prompt.ask('Enter an InvokeAI tag or branch name')
|
||||
tag = Prompt.ask('Enter an InvokeAI tag name')
|
||||
elif choice=='4':
|
||||
branch = Prompt.ask('Enter an InvokeAI branch name')
|
||||
|
||||
print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]')
|
||||
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517 --upgrade'
|
||||
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
|
||||
if release:
|
||||
cmd = f'pip install {INVOKE_AI_SRC}/{release}.zip --use-pep517 --upgrade'
|
||||
elif tag:
|
||||
cmd = f'pip install {INVOKE_AI_TAG}/{tag}.zip --use-pep517 --upgrade'
|
||||
else:
|
||||
cmd = f'pip install {INVOKE_AI_BRANCH}/{branch}.zip --use-pep517 --upgrade'
|
||||
print('')
|
||||
print('')
|
||||
if os.system(cmd)==0:
|
||||
|
||||
@@ -29,7 +29,13 @@ Model_dir = "models"
|
||||
Weights_dir = "ldm/stable-diffusion-v1/"
|
||||
|
||||
# the initial "configs" dir is now bundled in the `invokeai.configs` package
|
||||
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
|
||||
Dataset_path = None
|
||||
for path in configs.__path__:
|
||||
file =Path(path, "INITIAL_MODELS.yaml")
|
||||
if file.exists():
|
||||
Dataset_path = file
|
||||
break
|
||||
assert Dataset_path,f"Could not find the file INITIAL_MODELS.yaml in {configs.__path__}"
|
||||
|
||||
# initial models omegaconf
|
||||
Datasets = None
|
||||
|
||||
@@ -32,7 +32,8 @@ def expand_prompts(
|
||||
template_file: Path,
|
||||
run_invoke: bool = False,
|
||||
invoke_model: str = None,
|
||||
invoke_outdir: Path = None,
|
||||
invoke_outdir: str = None,
|
||||
invoke_root: str = None,
|
||||
processes_per_gpu: int = 1,
|
||||
):
|
||||
"""
|
||||
@@ -61,6 +62,8 @@ def expand_prompts(
|
||||
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
||||
if invoke_model:
|
||||
invokeai_args.extend(("--model", invoke_model))
|
||||
if invoke_root:
|
||||
invokeai_args.extend(("--root", invoke_root))
|
||||
if invoke_outdir:
|
||||
outdir = os.path.expanduser(invoke_outdir)
|
||||
invokeai_args.extend(("--outdir", outdir))
|
||||
@@ -79,6 +82,11 @@ def expand_prompts(
|
||||
)
|
||||
import ldm.invoke.CLI
|
||||
|
||||
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
|
||||
parent_conn, child_conn = Pipe()
|
||||
children = set()
|
||||
for i in range(processes_to_launch):
|
||||
@@ -99,8 +107,9 @@ def expand_prompts(
|
||||
sequence = 0
|
||||
for command in commands:
|
||||
sequence += 1
|
||||
parent_conn.send(
|
||||
command + f' --fnformat="dp.{sequence:04}.{{prompt}}.png"'
|
||||
format = _get_fn_format(outdir, sequence)
|
||||
parent_conn.send_bytes(
|
||||
(command + f' --fnformat="{format}"').encode('utf-8')
|
||||
)
|
||||
parent_conn.close()
|
||||
else:
|
||||
@@ -110,7 +119,27 @@ def expand_prompts(
|
||||
for p in children:
|
||||
p.terminate()
|
||||
|
||||
def _dummy_cli_main():
|
||||
counter = 0
|
||||
while line := sys.stdin.readline():
|
||||
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
|
||||
counter += 1
|
||||
time.sleep(1)
|
||||
|
||||
def _get_fn_format(directory:str, sequence:int)->str:
|
||||
"""
|
||||
Get a filename that doesn't exceed filename length restrictions
|
||||
on the current platform.
|
||||
"""
|
||||
try:
|
||||
max_length = os.pathconf(directory,'PC_NAME_MAX')
|
||||
except:
|
||||
max_length = 255
|
||||
prefix = f'dp.{sequence:04}.'
|
||||
suffix = '.png'
|
||||
max_length -= len(prefix)+len(suffix)
|
||||
return f'{prefix}{{prompt:0.{max_length}}}{suffix}'
|
||||
|
||||
class MessageToStdin(object):
|
||||
def __init__(self, connection: Connection):
|
||||
self.connection = connection
|
||||
@@ -119,7 +148,7 @@ class MessageToStdin(object):
|
||||
def readline(self) -> str:
|
||||
try:
|
||||
if len(self.linebuffer) == 0:
|
||||
message = self.connection.recv()
|
||||
message = self.connection.recv_bytes().decode('utf-8')
|
||||
self.linebuffer = message.split("\n")
|
||||
result = self.linebuffer.pop(0)
|
||||
return result
|
||||
@@ -165,9 +194,9 @@ def _run_invoke(
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
||||
sys.argv = args
|
||||
sys.stdin = MessageToStdin(conn_in)
|
||||
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
# sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
# with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
|
||||
|
||||
def _filter_output(stream: TextIOBase):
|
||||
@@ -224,6 +253,10 @@ def main():
|
||||
default=1,
|
||||
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
|
||||
opt = parser.parse_args()
|
||||
|
||||
if opt.example:
|
||||
@@ -247,6 +280,7 @@ def main():
|
||||
run_invoke=opt.invoke,
|
||||
invoke_model=opt.model,
|
||||
invoke_outdir=opt.outdir,
|
||||
invoke_root=opt.root,
|
||||
processes_per_gpu=opt.processes_per_gpu,
|
||||
)
|
||||
|
||||
|
||||
@@ -29,6 +29,8 @@ from typing_extensions import ParamSpec
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
from ldm.modules.lora_manager import LoraManager
|
||||
from ldm.modules.peft_manager import PeftManager
|
||||
from ..devices import normalize_device, CPU_DEVICE
|
||||
from ..offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
|
||||
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
@@ -289,11 +291,14 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
self.lora_manager = LoraManager(self)
|
||||
self.peft_manager = PeftManager()
|
||||
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward, is_running_diffusers=True)
|
||||
use_full_precision = (precision == 'float32' or precision == 'autocast')
|
||||
self.textual_inversion_manager = TextualInversionManager(tokenizer=self.tokenizer,
|
||||
text_encoder=self.text_encoder,
|
||||
full_precision=use_full_precision)
|
||||
|
||||
# InvokeAI's interface for text embeddings and whatnot
|
||||
self.embeddings_provider = EmbeddingsProvider(
|
||||
tokenizer=self.tokenizer,
|
||||
|
||||
@@ -255,8 +255,8 @@ class Inpaint(Img2Img):
|
||||
pipeline.scheduler = sampler
|
||||
|
||||
# todo: support cross-attention control
|
||||
uc, c, _ = conditioning
|
||||
conditioning_data = (ConditioningData(uc, c, cfg_scale)
|
||||
uc, c, extra_conditioning_info = conditioning
|
||||
conditioning_data = (ConditioningData(uc, c, cfg_scale, extra_conditioning_info)
|
||||
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
|
||||
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ Globals = Namespace()
|
||||
Globals.initfile = 'invokeai.init'
|
||||
Globals.models_file = 'models.yaml'
|
||||
Globals.models_dir = 'models'
|
||||
Globals.lora_models_dir = 'loras'
|
||||
Globals.config_dir = 'configs'
|
||||
Globals.autoscan_dir = 'weights'
|
||||
Globals.converted_ckpts_dir = 'converted_ckpts'
|
||||
@@ -61,7 +62,7 @@ Globals.sequential_guidance = False
|
||||
Globals.full_precision = False
|
||||
|
||||
# whether we should convert ckpt files into diffusers models on the fly
|
||||
Globals.ckpt_convert = False
|
||||
Globals.ckpt_convert = True
|
||||
|
||||
# logging tokenization everywhere
|
||||
Globals.log_tokenization = False
|
||||
@@ -75,6 +76,11 @@ def global_config_dir()->Path:
|
||||
def global_models_dir()->Path:
|
||||
return Path(Globals.root, Globals.models_dir)
|
||||
|
||||
def global_lora_models_dir()->Path:
|
||||
return Path(Globals.lora_models_dir) \
|
||||
if Path(Globals.lora_models_dir).is_absolute() \
|
||||
else Path(Globals.root, Globals.lora_models_dir)
|
||||
|
||||
def global_autoscan_dir()->Path:
|
||||
return Path(Globals.root, Globals.autoscan_dir)
|
||||
|
||||
|
||||
29
ldm/invoke/invokeai_metadata.py
Executable file
29
ldm/invoke/invokeai_metadata.py
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import json
|
||||
from ldm.invoke.pngwriter import retrieve_metadata
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
|
||||
print("This script opens up the indicated invoke.py-generated PNG file(s) and prints out their metadata.")
|
||||
exit(-1)
|
||||
|
||||
filenames = sys.argv[1:]
|
||||
for f in filenames:
|
||||
try:
|
||||
metadata = retrieve_metadata(f)
|
||||
print(f'{f}:\n',json.dumps(metadata['sd-metadata'], indent=4))
|
||||
except FileNotFoundError:
|
||||
sys.stderr.write(f'{f} not found\n')
|
||||
continue
|
||||
except PermissionError:
|
||||
sys.stderr.write(f'{f} could not be opened due to inadequate permissions\n')
|
||||
continue
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@@ -19,7 +19,7 @@ import warnings
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from shutil import move, rmtree
|
||||
from typing import Any, Optional, Union, Callable
|
||||
from typing import Any, Callable, Optional, Union, List
|
||||
|
||||
import safetensors
|
||||
import safetensors.torch
|
||||
@@ -35,12 +35,7 @@ from picklescan.scanner import scan_file_path
|
||||
from ldm.invoke.devices import CPU_DEVICE
|
||||
from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
from ldm.invoke.globals import Globals, global_cache_dir
|
||||
from ldm.util import (
|
||||
ask_user,
|
||||
download_with_resume,
|
||||
instantiate_from_config,
|
||||
url_attachment_name,
|
||||
)
|
||||
from ldm.util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name
|
||||
|
||||
|
||||
class SDLegacyType(Enum):
|
||||
@@ -51,12 +46,7 @@ class SDLegacyType(Enum):
|
||||
V2_v = 5
|
||||
UNKNOWN = 99
|
||||
|
||||
|
||||
DEFAULT_MAX_MODELS = 2
|
||||
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
|
||||
"vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse",
|
||||
}
|
||||
|
||||
|
||||
class ModelManager(object):
|
||||
def __init__(
|
||||
@@ -113,11 +103,7 @@ class ModelManager(object):
|
||||
requested_model = self.models[model_name]["model"]
|
||||
print(f">> Retrieving model {model_name} from system RAM cache")
|
||||
self.models[model_name]["model"] = self._model_from_cpu(requested_model)
|
||||
width = self.models[model_name]["width"]
|
||||
height = self.models[model_name]["height"]
|
||||
hash = self.models[model_name]["hash"]
|
||||
|
||||
else: # we're about to load a new model, so potentially offload the least recently used one
|
||||
else:
|
||||
requested_model, width, height, hash = self._load_model(model_name)
|
||||
self.models[model_name] = {
|
||||
"model": requested_model,
|
||||
@@ -128,13 +114,8 @@ class ModelManager(object):
|
||||
|
||||
self.current_model = model_name
|
||||
self._push_newest_model(model_name)
|
||||
return {
|
||||
"model": requested_model,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
return self.models[model_name]
|
||||
|
||||
def default_model(self) -> str | None:
|
||||
"""
|
||||
Returns the name of the default model, or None
|
||||
@@ -171,19 +152,6 @@ class ModelManager(object):
|
||||
"""
|
||||
return list(self.config.keys())
|
||||
|
||||
def is_legacy(self, model_name: str) -> bool:
|
||||
"""
|
||||
Return true if this is a legacy (.ckpt) model
|
||||
"""
|
||||
# if we are converting legacy files automatically, then
|
||||
# there are no legacy ckpts!
|
||||
if Globals.ckpt_convert:
|
||||
return False
|
||||
info = self.model_info(model_name)
|
||||
if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")):
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_models(self) -> dict:
|
||||
"""
|
||||
Return a dict of models in the format:
|
||||
@@ -384,104 +352,55 @@ class ModelManager(object):
|
||||
if not os.path.isabs(weights):
|
||||
weights = os.path.normpath(os.path.join(Globals.root, weights))
|
||||
|
||||
# if converting automatically to diffusers, then we do the conversion and return
|
||||
# a diffusers pipeline
|
||||
if Globals.ckpt_convert:
|
||||
print(
|
||||
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
|
||||
)
|
||||
from ldm.invoke.ckpt_to_diffuser import (
|
||||
load_pipeline_from_original_stable_diffusion_ckpt,
|
||||
)
|
||||
if matching_config := self._scan_for_matching_file(Path(weights),suffixes=['.yaml']):
|
||||
print(f' | Using external config file {matching_config}')
|
||||
config = matching_config
|
||||
|
||||
self.offload_model(self.current_model)
|
||||
if vae_config := self._choose_diffusers_vae(model_name):
|
||||
vae = self._load_vae(vae_config)
|
||||
if self._has_cuda():
|
||||
torch.cuda.empty_cache()
|
||||
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
checkpoint_path=weights,
|
||||
original_config_file=config,
|
||||
vae=vae,
|
||||
return_generator_pipeline=True,
|
||||
precision=torch.float16
|
||||
if self.precision == "float16"
|
||||
else torch.float32,
|
||||
)
|
||||
if self.sequential_offload:
|
||||
pipeline.enable_offload_submodels(self.device)
|
||||
else:
|
||||
pipeline.to(self.device)
|
||||
|
||||
return (
|
||||
pipeline,
|
||||
width,
|
||||
height,
|
||||
"NOHASH",
|
||||
)
|
||||
|
||||
# for usage statistics
|
||||
if self._has_cuda():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# this does the work
|
||||
if not os.path.isabs(config):
|
||||
config = os.path.join(Globals.root, config)
|
||||
omega_config = OmegaConf.load(config)
|
||||
with open(weights, "rb") as f:
|
||||
weight_bytes = f.read()
|
||||
model_hash = self._cached_sha256(weights, weight_bytes)
|
||||
sd = None
|
||||
|
||||
if weights.endswith(".ckpt"):
|
||||
self.scan_model(model_name, weights)
|
||||
sd = torch.load(io.BytesIO(weight_bytes), map_location="cpu")
|
||||
else:
|
||||
sd = safetensors.torch.load(weight_bytes)
|
||||
|
||||
del weight_bytes
|
||||
# merged models from auto11 merge board are flat for some reason
|
||||
if "state_dict" in sd:
|
||||
sd = sd["state_dict"]
|
||||
|
||||
print(" | Forcing garbage collection prior to loading new model")
|
||||
gc.collect()
|
||||
model = instantiate_from_config(omega_config.model)
|
||||
model.load_state_dict(sd, strict=False)
|
||||
|
||||
if self.precision == "float16":
|
||||
print(" | Using faster float16 precision")
|
||||
model = model.to(torch.float16)
|
||||
else:
|
||||
print(" | Using more accurate float32 precision")
|
||||
|
||||
# look and load a matching vae file. Code borrowed from AUTOMATIC1111 modules/sd_models.py
|
||||
# get the path to the custom vae, if any
|
||||
vae_path = None
|
||||
# first we use whatever is in the config file
|
||||
if vae:
|
||||
if not os.path.isabs(vae):
|
||||
vae = os.path.normpath(os.path.join(Globals.root, vae))
|
||||
if os.path.exists(vae):
|
||||
print(f" | Loading VAE weights from: {vae}")
|
||||
if vae.endswith((".ckpt",".pt")):
|
||||
self.scan_model(vae,vae)
|
||||
vae_ckpt = torch.load(vae, map_location="cpu")
|
||||
else:
|
||||
vae_ckpt = safetensors.torch.load_file(vae)
|
||||
vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"}
|
||||
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||
else:
|
||||
print(f" | VAE file {vae} not found. Skipping.")
|
||||
path = Path(vae if os.path.isabs(vae) else os.path.normpath(os.path.join(Globals.root, vae)))
|
||||
if path.exists():
|
||||
vae_path = path
|
||||
# then we look for a file with the same basename
|
||||
vae_path = vae_path or self._scan_for_matching_file(Path(weights))
|
||||
|
||||
# Do the conversion and return a diffusers pipeline
|
||||
print(
|
||||
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
|
||||
)
|
||||
from ldm.invoke.ckpt_to_diffuser import load_pipeline_from_original_stable_diffusion_ckpt
|
||||
|
||||
model.to(self.device)
|
||||
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
|
||||
model.cond_stage_model.device = self.device
|
||||
# try:
|
||||
# if self.list_models()[self.current_model]['status'] == 'active':
|
||||
# self.offload_model(self.current_model)
|
||||
# except Exception:
|
||||
# pass
|
||||
|
||||
model.eval()
|
||||
if self._has_cuda():
|
||||
torch.cuda.empty_cache()
|
||||
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
checkpoint_path=weights,
|
||||
original_config_file=config,
|
||||
vae_path=vae_path,
|
||||
return_generator_pipeline=True,
|
||||
precision=torch.float16
|
||||
if self.precision == "float16"
|
||||
else torch.float32,
|
||||
)
|
||||
if self.sequential_offload:
|
||||
pipeline.enable_offload_submodels(self.device)
|
||||
else:
|
||||
pipeline.to(self.device)
|
||||
|
||||
return (
|
||||
pipeline,
|
||||
width,
|
||||
height,
|
||||
"NOHASH",
|
||||
)
|
||||
|
||||
for module in model.modules():
|
||||
if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
|
||||
module._orig_padding_mode = module.padding_mode
|
||||
return model, width, height, model_hash
|
||||
|
||||
def _load_diffusers_model(self, mconfig):
|
||||
name_or_path = self.model_name_or_path(mconfig)
|
||||
@@ -547,6 +466,17 @@ class ModelManager(object):
|
||||
|
||||
return pipeline, width, height, model_hash
|
||||
|
||||
def is_v2_config(self, config: Path) -> bool:
|
||||
if not os.path.isabs(config):
|
||||
config = os.path.join(Globals.root, config)
|
||||
try:
|
||||
mconfig = OmegaConf.load(config)
|
||||
return (
|
||||
mconfig["model"]["params"]["unet_config"]["params"]["context_dim"] > 768
|
||||
)
|
||||
except:
|
||||
return False
|
||||
|
||||
def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path:
|
||||
if isinstance(model_name, DictConfig) or isinstance(model_name, dict):
|
||||
mconfig = model_name
|
||||
@@ -724,7 +654,7 @@ class ModelManager(object):
|
||||
SDLegacyType.V2_v (V2 using 'v_prediction' prediction type)
|
||||
SDLegacyType.UNKNOWN
|
||||
"""
|
||||
global_step = checkpoint.get('global_step')
|
||||
global_step = checkpoint.get("global_step")
|
||||
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||
|
||||
try:
|
||||
@@ -751,14 +681,13 @@ class ModelManager(object):
|
||||
return SDLegacyType.UNKNOWN
|
||||
|
||||
def heuristic_import(
|
||||
self,
|
||||
path_url_or_repo: str,
|
||||
convert: bool = False,
|
||||
model_name: str = None,
|
||||
description: str = None,
|
||||
model_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
config_file_callback: Callable[[Path],Path] = None,
|
||||
self,
|
||||
path_url_or_repo: str,
|
||||
model_name: str = None,
|
||||
description: str = None,
|
||||
model_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
config_file_callback: Callable[[Path], Path] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Accept a string which could be:
|
||||
@@ -775,9 +704,6 @@ class ModelManager(object):
|
||||
The model_name and/or description can be provided. If not, they will
|
||||
be generated automatically.
|
||||
|
||||
If convert is true, legacy models will be converted to diffusers
|
||||
before importing.
|
||||
|
||||
If commit_to_conf is provided, the newly loaded model will be written
|
||||
to the `models.yaml` file at the indicated path. Otherwise, the changes
|
||||
will only remain in memory.
|
||||
@@ -813,7 +739,6 @@ class ModelManager(object):
|
||||
print(f" | {thing} appears to be a diffusers file on disk")
|
||||
model_name = self.import_diffuser_model(
|
||||
thing,
|
||||
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
||||
model_name=model_name,
|
||||
description=description,
|
||||
commit_to_conf=commit_to_conf,
|
||||
@@ -833,10 +758,9 @@ class ModelManager(object):
|
||||
Path(thing).rglob("*.safetensors")
|
||||
):
|
||||
if model_name := self.heuristic_import(
|
||||
str(m),
|
||||
convert,
|
||||
commit_to_conf=commit_to_conf,
|
||||
config_file_callback=config_file_callback,
|
||||
str(m),
|
||||
commit_to_conf=commit_to_conf,
|
||||
config_file_callback=config_file_callback,
|
||||
):
|
||||
print(f" >> {model_name} successfully imported")
|
||||
return model_name
|
||||
@@ -864,104 +788,92 @@ class ModelManager(object):
|
||||
|
||||
# another round of heuristics to guess the correct config file.
|
||||
checkpoint = None
|
||||
if model_path.suffix.endswith((".ckpt",".pt")):
|
||||
self.scan_model(model_path,model_path)
|
||||
if model_path.suffix.endswith((".ckpt", ".pt")):
|
||||
self.scan_model(model_path, model_path)
|
||||
checkpoint = torch.load(model_path)
|
||||
else:
|
||||
checkpoint = safetensors.torch.load_file(model_path)
|
||||
# additional probing needed if no config file provided
|
||||
if model_config_file is None:
|
||||
model_type = self.probe_model_type(checkpoint)
|
||||
if model_type == SDLegacyType.V1:
|
||||
print(" | SD-v1 model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V1_INPAINT:
|
||||
print(" | SD-v1 inpainting model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_v:
|
||||
print(
|
||||
" | SD-v2-v model detected"
|
||||
)
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_e:
|
||||
print(
|
||||
" | SD-v2-e model detected"
|
||||
)
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2:
|
||||
print(
|
||||
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
|
||||
)
|
||||
# Is there a like-named .yaml file in the same directory as the
|
||||
# weights file? If so, we treat this as our model
|
||||
if model_path.with_suffix(".yaml").exists():
|
||||
model_config_file = model_path.with_suffix(".yaml")
|
||||
print(f" | Using config file {model_config_file.name}")
|
||||
else:
|
||||
print(
|
||||
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
|
||||
)
|
||||
model_type = self.probe_model_type(checkpoint)
|
||||
if model_type == SDLegacyType.V1:
|
||||
print(" | SD-v1 model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V1_INPAINT:
|
||||
print(" | SD-v1 inpainting model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root,
|
||||
"configs/stable-diffusion/v1-inpainting-inference.yaml",
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_v:
|
||||
print(" | SD-v2-v model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_e:
|
||||
print(" | SD-v2-e model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2:
|
||||
print(
|
||||
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide the configuration file type or path."
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide the configuration file type or path."
|
||||
)
|
||||
|
||||
if not model_config_file and config_file_callback:
|
||||
model_config_file = config_file_callback(model_path)
|
||||
if not model_config_file:
|
||||
return
|
||||
|
||||
if model_config_file.name.startswith('v2'):
|
||||
convert = True
|
||||
print(
|
||||
" | This SD-v2 model will be converted to diffusers format for use"
|
||||
)
|
||||
|
||||
if convert:
|
||||
diffuser_path = Path(
|
||||
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
|
||||
)
|
||||
model_name = self.convert_and_import(
|
||||
model_path,
|
||||
diffusers_path=diffuser_path,
|
||||
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
||||
model_name=model_name,
|
||||
model_description=description,
|
||||
original_config_file=model_config_file,
|
||||
commit_to_conf=commit_to_conf,
|
||||
scan_needed=False,
|
||||
)
|
||||
# in the event that this file was downloaded automatically prior to conversion
|
||||
# we do not keep the original .ckpt/.safetensors around
|
||||
if is_temporary:
|
||||
model_path.unlink(missing_ok=True)
|
||||
else:
|
||||
model_name = self.import_ckpt_model(
|
||||
model_path,
|
||||
config=model_config_file,
|
||||
model_name=model_name,
|
||||
model_description=description,
|
||||
vae=str(
|
||||
Path(
|
||||
Globals.root,
|
||||
"models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt",
|
||||
)
|
||||
),
|
||||
commit_to_conf=commit_to_conf,
|
||||
)
|
||||
if (vae_path := self._scan_for_matching_file(model_path)):
|
||||
print(f" | Using VAE file {vae_path.name}")
|
||||
|
||||
diffuser_path = Path(
|
||||
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
|
||||
)
|
||||
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
|
||||
model_name = self.convert_and_import(
|
||||
model_path,
|
||||
diffusers_path=diffuser_path,
|
||||
vae=vae,
|
||||
vae_path=vae_path,
|
||||
model_name=model_name,
|
||||
model_description=description,
|
||||
original_config_file=model_config_file,
|
||||
commit_to_conf=commit_to_conf,
|
||||
scan_needed=False,
|
||||
)
|
||||
# in the event that this file was downloaded automatically prior to conversion
|
||||
# we do not keep the original .ckpt/.safetensors around
|
||||
if is_temporary:
|
||||
model_path.unlink(missing_ok=True)
|
||||
if commit_to_conf:
|
||||
self.commit(commit_to_conf)
|
||||
return model_name
|
||||
|
||||
def convert_and_import(
|
||||
self,
|
||||
ckpt_path: Path,
|
||||
diffusers_path: Path,
|
||||
model_name=None,
|
||||
model_description=None,
|
||||
vae=None,
|
||||
original_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
scan_needed: bool=True,
|
||||
self,
|
||||
ckpt_path: Path,
|
||||
diffusers_path: Path,
|
||||
model_name=None,
|
||||
model_description=None,
|
||||
vae: dict = None,
|
||||
vae_path: Path = None,
|
||||
original_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
scan_needed: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Convert a legacy ckpt weights file to diffuser model and import
|
||||
@@ -975,7 +887,7 @@ class ModelManager(object):
|
||||
|
||||
new_config = None
|
||||
|
||||
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
|
||||
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffusers
|
||||
|
||||
if diffusers_path.exists():
|
||||
print(
|
||||
@@ -989,13 +901,17 @@ class ModelManager(object):
|
||||
try:
|
||||
# By passing the specified VAE to the conversion function, the autoencoder
|
||||
# will be built into the model rather than tacked on afterward via the config file
|
||||
vae_model = self._load_vae(vae) if vae else None
|
||||
convert_ckpt_to_diffuser(
|
||||
vae_model=None
|
||||
if vae:
|
||||
vae_model=self._load_vae(vae)
|
||||
vae_path=None
|
||||
convert_ckpt_to_diffusers(
|
||||
ckpt_path,
|
||||
diffusers_path,
|
||||
extract_ema=True,
|
||||
original_config_file=original_config_file,
|
||||
vae=vae_model,
|
||||
vae_path=vae_path,
|
||||
scan_needed=scan_needed,
|
||||
)
|
||||
print(
|
||||
@@ -1042,36 +958,6 @@ class ModelManager(object):
|
||||
|
||||
return search_folder, found_models
|
||||
|
||||
def _choose_diffusers_vae(
|
||||
self, model_name: str, vae: str = None
|
||||
) -> Union[dict, str]:
|
||||
# In the event that the original entry is using a custom ckpt VAE, we try to
|
||||
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
|
||||
# I would prefer to do this differently: We load the ckpt model into memory, swap the
|
||||
# VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped
|
||||
# VAE is built into the model. However, when I tried this I got obscure key errors.
|
||||
if vae:
|
||||
return vae
|
||||
if model_name in self.config and (
|
||||
vae_ckpt_path := self.model_info(model_name).get("vae", None)
|
||||
):
|
||||
vae_basename = Path(vae_ckpt_path).stem
|
||||
diffusers_vae = None
|
||||
if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None):
|
||||
print(
|
||||
f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version"
|
||||
)
|
||||
vae = {"repo_id": diffusers_vae}
|
||||
else:
|
||||
print(
|
||||
f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown'
|
||||
)
|
||||
print(
|
||||
'** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config'
|
||||
)
|
||||
vae = {"repo_id": "stabilityai/sd-vae-ft-mse"}
|
||||
return vae
|
||||
|
||||
def _make_cache_room(self) -> None:
|
||||
num_loaded_models = len(self.models)
|
||||
if num_loaded_models >= self.max_loaded_models:
|
||||
@@ -1134,14 +1020,14 @@ class ModelManager(object):
|
||||
legacy_locations = [
|
||||
Path(
|
||||
models_dir,
|
||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker"
|
||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
|
||||
),
|
||||
Path("bert-base-uncased/models--bert-base-uncased"),
|
||||
Path(
|
||||
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
|
||||
),
|
||||
]
|
||||
legacy_locations.extend(list(global_cache_dir("diffusers").glob('*')))
|
||||
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
|
||||
legacy_layout = False
|
||||
for model in legacy_locations:
|
||||
legacy_layout = legacy_layout or model.exists()
|
||||
@@ -1185,7 +1071,7 @@ class ModelManager(object):
|
||||
source.unlink()
|
||||
else:
|
||||
move(source, dest)
|
||||
|
||||
|
||||
# now clean up by removing any empty directories
|
||||
empty = [
|
||||
root
|
||||
@@ -1333,6 +1219,22 @@ class ModelManager(object):
|
||||
f.write(hash)
|
||||
return hash
|
||||
|
||||
@classmethod
|
||||
def _scan_for_matching_file(
|
||||
self,model_path: Path,
|
||||
suffixes: List[str]=['.vae.pt','.vae.ckpt','.vae.safetensors']
|
||||
)->Path:
|
||||
"""
|
||||
Find a file with same basename as the indicated model, but with one
|
||||
of the suffixes passed.
|
||||
"""
|
||||
# look for a custom vae
|
||||
vae_path = None
|
||||
for suffix in suffixes:
|
||||
if model_path.with_suffix(suffix).exists():
|
||||
vae_path = model_path.with_suffix(suffix)
|
||||
return vae_path
|
||||
|
||||
def _load_vae(self, vae_config) -> AutoencoderKL:
|
||||
vae_args = {}
|
||||
try:
|
||||
@@ -1344,7 +1246,7 @@ class ModelManager(object):
|
||||
using_fp16 = self.precision == "float16"
|
||||
|
||||
vae_args.update(
|
||||
cache_dir=global_cache_dir("hug"),
|
||||
cache_dir=global_cache_dir("hub"),
|
||||
local_files_only=not Globals.internet_available,
|
||||
)
|
||||
|
||||
|
||||
@@ -11,9 +11,11 @@ seeds:
|
||||
import os
|
||||
import re
|
||||
import atexit
|
||||
from typing import List
|
||||
from ldm.invoke.args import Args
|
||||
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.modules.lora_manager import LoraManager
|
||||
|
||||
# ---------------readline utilities---------------------
|
||||
try:
|
||||
@@ -136,6 +138,9 @@ class Completer(object):
|
||||
elif re.search('<[\w-]*$',buffer):
|
||||
self.matches= self._concept_completions(text,state)
|
||||
|
||||
elif re.search('withLora\(?[a-zA-Z0-9._-]*$',buffer):
|
||||
self.matches= self._lora_completions(text,state)
|
||||
|
||||
# looking for a model
|
||||
elif re.match('^'+'|'.join(MODEL_COMMANDS),buffer):
|
||||
self.matches= self._model_completions(text, state)
|
||||
@@ -298,6 +303,15 @@ class Completer(object):
|
||||
matches.sort()
|
||||
return matches
|
||||
|
||||
def _lora_completions(self, text, state)->List[str]:
|
||||
loras: dict = LoraManager.list_loras()
|
||||
lora_names = [f'withLora({x},1)' for x in loras.keys()]
|
||||
matches = list()
|
||||
for lora in lora_names:
|
||||
if lora.startswith(text):
|
||||
matches.append(lora)
|
||||
return sorted(matches)
|
||||
|
||||
def _model_completions(self, text, state, ckpt_only=False):
|
||||
m = re.search('(!switch\s+)(\w*)',text)
|
||||
if m:
|
||||
|
||||
@@ -289,10 +289,10 @@ class InvokeAICrossAttentionMixin:
|
||||
|
||||
|
||||
|
||||
def restore_default_cross_attention(model, is_running_diffusers: bool, restore_attention_processor: Optional[AttnProcessor]=None):
|
||||
def restore_default_cross_attention(model, is_running_diffusers: bool, processors_to_restore: Optional[AttnProcessor]=None):
|
||||
if is_running_diffusers:
|
||||
unet = model
|
||||
unet.set_attn_processor(restore_attention_processor or CrossAttnProcessor())
|
||||
unet.set_attn_processor(processors_to_restore or CrossAttnProcessor())
|
||||
else:
|
||||
remove_attention_function(model)
|
||||
|
||||
@@ -334,11 +334,9 @@ def override_cross_attention(model, context: Context, is_running_diffusers = Fal
|
||||
default_slice_size = 4
|
||||
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
|
||||
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
|
||||
return old_attn_processors
|
||||
else:
|
||||
context.register_cross_attention_modules(model)
|
||||
inject_attention_function(model, context)
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ class DDIMSampler(Sampler):
|
||||
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
|
||||
|
||||
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
|
||||
self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count)
|
||||
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
|
||||
else:
|
||||
self.invokeai_diffuser.restore_default_cross_attention()
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from functools import partial
|
||||
from tqdm import tqdm
|
||||
from torchvision.utils import make_grid
|
||||
from pytorch_lightning.utilities.distributed import rank_zero_only
|
||||
from omegaconf import ListConfig
|
||||
from omegaconf import ListConfig, OmegaConf
|
||||
import urllib
|
||||
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
@@ -609,6 +609,7 @@ class DDPM(pl.LightningModule):
|
||||
opt = torch.optim.AdamW(params, lr=lr)
|
||||
return opt
|
||||
|
||||
|
||||
|
||||
class LatentDiffusion(DDPM):
|
||||
"""main class"""
|
||||
@@ -617,7 +618,7 @@ class LatentDiffusion(DDPM):
|
||||
self,
|
||||
first_stage_config,
|
||||
cond_stage_config,
|
||||
personalization_config,
|
||||
personalization_config=None,
|
||||
num_timesteps_cond=None,
|
||||
cond_stage_key='image',
|
||||
cond_stage_trainable=False,
|
||||
@@ -675,7 +676,8 @@ class LatentDiffusion(DDPM):
|
||||
self.model.train = disabled_train
|
||||
for param in self.model.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
|
||||
personalization_config = personalization_config or self._fallback_personalization_config()
|
||||
self.embedding_manager = self.instantiate_embedding_manager(
|
||||
personalization_config, self.cond_stage_model
|
||||
)
|
||||
@@ -2150,6 +2152,25 @@ class LatentDiffusion(DDPM):
|
||||
|
||||
self.emb_ckpt_counter += 500
|
||||
|
||||
@classmethod
|
||||
def _fallback_personalization_config(self)->dict:
|
||||
"""
|
||||
This protects us against custom legacy config files that
|
||||
don't contain the personalization_config section.
|
||||
"""
|
||||
return OmegaConf.create(
|
||||
dict(
|
||||
target='ldm.modules.embedding_manager.EmbeddingManager',
|
||||
params=dict(
|
||||
placeholder_strings=list('*'),
|
||||
initializer_words=list('sculpture'),
|
||||
per_image_tokens=False,
|
||||
num_vectors_per_token=1,
|
||||
progressive_words=False,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class DiffusionWrapper(pl.LightningModule):
|
||||
def __init__(self, diff_model_config, conditioning_key):
|
||||
|
||||
@@ -43,7 +43,7 @@ class CFGDenoiser(nn.Module):
|
||||
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
|
||||
|
||||
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
|
||||
self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = t_enc)
|
||||
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = t_enc)
|
||||
else:
|
||||
self.invokeai_diffuser.restore_default_cross_attention()
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ class PLMSSampler(Sampler):
|
||||
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
|
||||
|
||||
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
|
||||
self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count)
|
||||
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
|
||||
else:
|
||||
self.invokeai_diffuser.restore_default_cross_attention()
|
||||
|
||||
|
||||
@@ -9,17 +9,28 @@ from diffusers.models.cross_attention import AttnProcessor
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.models.diffusion.cross_attention_control import Arguments, \
|
||||
restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \
|
||||
CrossAttentionType, SwapCrossAttnContext
|
||||
from ldm.models.diffusion.cross_attention_control import (
|
||||
Arguments,
|
||||
restore_default_cross_attention,
|
||||
override_cross_attention,
|
||||
Context,
|
||||
get_cross_attention_modules,
|
||||
CrossAttentionType,
|
||||
SwapCrossAttnContext,
|
||||
)
|
||||
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from ldm.modules.lora_manager import LoraCondition
|
||||
|
||||
ModelForwardCallback: TypeAlias = Union[
|
||||
# x, t, conditioning, Optional[cross-attention kwargs]
|
||||
Callable[[torch.Tensor, torch.Tensor, torch.Tensor, Optional[dict[str, Any]]], torch.Tensor],
|
||||
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor]
|
||||
Callable[
|
||||
[torch.Tensor, torch.Tensor, torch.Tensor, Optional[dict[str, Any]]],
|
||||
torch.Tensor,
|
||||
],
|
||||
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
|
||||
]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PostprocessingSettings:
|
||||
threshold: float
|
||||
@@ -29,31 +40,39 @@ class PostprocessingSettings:
|
||||
|
||||
|
||||
class InvokeAIDiffuserComponent:
|
||||
'''
|
||||
"""
|
||||
The aim of this component is to provide a single place for code that can be applied identically to
|
||||
all InvokeAI diffusion procedures.
|
||||
|
||||
At the moment it includes the following features:
|
||||
* Cross attention control ("prompt2prompt")
|
||||
* Hybrid conditioning (used for inpainting)
|
||||
'''
|
||||
* "LoRA" and "PEFT" augmentions to the unet's attention weights
|
||||
"""
|
||||
|
||||
debug_thresholding = False
|
||||
sequential_guidance = False
|
||||
|
||||
@dataclass
|
||||
class ExtraConditioningInfo:
|
||||
|
||||
tokens_count_including_eos_bos: int
|
||||
cross_attention_control_args: Optional[Arguments] = None
|
||||
lora_conditions: Optional[list[LoraCondition]] = None
|
||||
|
||||
@property
|
||||
def wants_cross_attention_control(self):
|
||||
return self.cross_attention_control_args is not None
|
||||
|
||||
@property
|
||||
def has_lora_conditions(self):
|
||||
return self.lora_conditions is not None
|
||||
|
||||
def __init__(self, model, model_forward_callback: ModelForwardCallback,
|
||||
is_running_diffusers: bool=False,
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
model,
|
||||
model_forward_callback: ModelForwardCallback,
|
||||
is_running_diffusers: bool = False,
|
||||
):
|
||||
"""
|
||||
:param model: the unet model to pass through to cross attention control
|
||||
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
|
||||
@@ -66,42 +85,64 @@ class InvokeAIDiffuserComponent:
|
||||
self.sequential_guidance = Globals.sequential_guidance
|
||||
|
||||
@contextmanager
|
||||
def custom_attention_context(self,
|
||||
extra_conditioning_info: Optional[ExtraConditioningInfo],
|
||||
step_count: int):
|
||||
do_swap = extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control
|
||||
def custom_attention_context(
|
||||
self, extra_conditioning_info: Optional[ExtraConditioningInfo], step_count: int
|
||||
):
|
||||
old_attn_processor = None
|
||||
if do_swap:
|
||||
old_attn_processor = self.override_cross_attention(extra_conditioning_info,
|
||||
step_count=step_count)
|
||||
if extra_conditioning_info and (
|
||||
extra_conditioning_info.wants_cross_attention_control
|
||||
| extra_conditioning_info.has_lora_conditions
|
||||
):
|
||||
old_attn_processor = self.override_attention_processors(
|
||||
extra_conditioning_info, step_count=step_count
|
||||
)
|
||||
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
if old_attn_processor is not None:
|
||||
self.restore_default_cross_attention(old_attn_processor)
|
||||
if extra_conditioning_info and extra_conditioning_info.has_lora_conditions:
|
||||
for lora_condition in extra_conditioning_info.lora_conditions:
|
||||
lora_condition.unload()
|
||||
# TODO resuscitate attention map saving
|
||||
#self.remove_attention_map_saving()
|
||||
# self.remove_attention_map_saving()
|
||||
|
||||
def override_cross_attention(self, conditioning: ExtraConditioningInfo, step_count: int) -> Dict[str, AttnProcessor]:
|
||||
def override_attention_processors(
|
||||
self, conditioning: ExtraConditioningInfo, step_count: int
|
||||
) -> Dict[str, AttnProcessor]:
|
||||
"""
|
||||
setup cross attention .swap control. for diffusers this replaces the attention processor, so
|
||||
the previous attention processor is returned so that the caller can restore it later.
|
||||
"""
|
||||
self.conditioning = conditioning
|
||||
self.cross_attention_control_context = Context(
|
||||
arguments=self.conditioning.cross_attention_control_args,
|
||||
step_count=step_count
|
||||
)
|
||||
return override_cross_attention(self.model,
|
||||
self.cross_attention_control_context,
|
||||
is_running_diffusers=self.is_running_diffusers)
|
||||
old_attn_processors = self.model.attn_processors
|
||||
|
||||
def restore_default_cross_attention(self, restore_attention_processor: Optional['AttnProcessor']=None):
|
||||
self.conditioning = None
|
||||
# Load lora conditions into the model
|
||||
if conditioning.has_lora_conditions:
|
||||
for condition in conditioning.lora_conditions:
|
||||
condition(self.model)
|
||||
|
||||
if conditioning.wants_cross_attention_control:
|
||||
self.cross_attention_control_context = Context(
|
||||
arguments=conditioning.cross_attention_control_args,
|
||||
step_count=step_count,
|
||||
)
|
||||
override_cross_attention(
|
||||
self.model,
|
||||
self.cross_attention_control_context,
|
||||
is_running_diffusers=self.is_running_diffusers,
|
||||
)
|
||||
return old_attn_processors
|
||||
|
||||
def restore_default_cross_attention(
|
||||
self, processors_to_restore: Optional[dict[str, "AttnProcessor"]] = None
|
||||
):
|
||||
self.cross_attention_control_context = None
|
||||
restore_default_cross_attention(self.model,
|
||||
is_running_diffusers=self.is_running_diffusers,
|
||||
restore_attention_processor=restore_attention_processor)
|
||||
restore_default_cross_attention(
|
||||
self.model,
|
||||
is_running_diffusers=self.is_running_diffusers,
|
||||
processors_to_restore=processors_to_restore,
|
||||
)
|
||||
|
||||
def setup_attention_map_saving(self, saver: AttentionMapSaver):
|
||||
def callback(slice, dim, offset, slice_size, key):
|
||||
@@ -110,26 +151,40 @@ class InvokeAIDiffuserComponent:
|
||||
return
|
||||
saver.add_attention_maps(slice, key)
|
||||
|
||||
tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS)
|
||||
tokens_cross_attention_modules = get_cross_attention_modules(
|
||||
self.model, CrossAttentionType.TOKENS
|
||||
)
|
||||
for identifier, module in tokens_cross_attention_modules:
|
||||
key = ('down' if identifier.startswith('down') else
|
||||
'up' if identifier.startswith('up') else
|
||||
'mid')
|
||||
key = (
|
||||
"down"
|
||||
if identifier.startswith("down")
|
||||
else "up"
|
||||
if identifier.startswith("up")
|
||||
else "mid"
|
||||
)
|
||||
module.set_attention_slice_calculated_callback(
|
||||
lambda slice, dim, offset, slice_size, key=key: callback(slice, dim, offset, slice_size, key))
|
||||
lambda slice, dim, offset, slice_size, key=key: callback(
|
||||
slice, dim, offset, slice_size, key
|
||||
)
|
||||
)
|
||||
|
||||
def remove_attention_map_saving(self):
|
||||
tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS)
|
||||
tokens_cross_attention_modules = get_cross_attention_modules(
|
||||
self.model, CrossAttentionType.TOKENS
|
||||
)
|
||||
for _, module in tokens_cross_attention_modules:
|
||||
module.set_attention_slice_calculated_callback(None)
|
||||
|
||||
def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor,
|
||||
unconditioning: Union[torch.Tensor,dict],
|
||||
conditioning: Union[torch.Tensor,dict],
|
||||
unconditional_guidance_scale: float,
|
||||
step_index: Optional[int]=None,
|
||||
total_step_count: Optional[int]=None,
|
||||
):
|
||||
def do_diffusion_step(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
sigma: torch.Tensor,
|
||||
unconditioning: Union[torch.Tensor, dict],
|
||||
conditioning: Union[torch.Tensor, dict],
|
||||
unconditional_guidance_scale: float,
|
||||
step_index: Optional[int] = None,
|
||||
total_step_count: Optional[int] = None,
|
||||
):
|
||||
"""
|
||||
:param x: current latents
|
||||
:param sigma: aka t, passed to the internal model to control how much denoising will occur
|
||||
@@ -140,33 +195,55 @@ class InvokeAIDiffuserComponent:
|
||||
:return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning.
|
||||
"""
|
||||
|
||||
|
||||
cross_attention_control_types_to_do = []
|
||||
context: Context = self.cross_attention_control_context
|
||||
if self.cross_attention_control_context is not None:
|
||||
percent_through = self.calculate_percent_through(sigma, step_index, total_step_count)
|
||||
cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through)
|
||||
percent_through = self.calculate_percent_through(
|
||||
sigma, step_index, total_step_count
|
||||
)
|
||||
cross_attention_control_types_to_do = (
|
||||
context.get_active_cross_attention_control_types_for_step(
|
||||
percent_through
|
||||
)
|
||||
)
|
||||
|
||||
wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0)
|
||||
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
|
||||
wants_hybrid_conditioning = isinstance(conditioning, dict)
|
||||
|
||||
if wants_hybrid_conditioning:
|
||||
unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(x, sigma, unconditioning,
|
||||
conditioning)
|
||||
unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(
|
||||
x, sigma, unconditioning, conditioning
|
||||
)
|
||||
elif wants_cross_attention_control:
|
||||
unconditioned_next_x, conditioned_next_x = self._apply_cross_attention_controlled_conditioning(x, sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do)
|
||||
(
|
||||
unconditioned_next_x,
|
||||
conditioned_next_x,
|
||||
) = self._apply_cross_attention_controlled_conditioning(
|
||||
x,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do,
|
||||
)
|
||||
elif self.sequential_guidance:
|
||||
unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially(
|
||||
x, sigma, unconditioning, conditioning)
|
||||
(
|
||||
unconditioned_next_x,
|
||||
conditioned_next_x,
|
||||
) = self._apply_standard_conditioning_sequentially(
|
||||
x, sigma, unconditioning, conditioning
|
||||
)
|
||||
|
||||
else:
|
||||
unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning(
|
||||
x, sigma, unconditioning, conditioning)
|
||||
(
|
||||
unconditioned_next_x,
|
||||
conditioned_next_x,
|
||||
) = self._apply_standard_conditioning(
|
||||
x, sigma, unconditioning, conditioning
|
||||
)
|
||||
|
||||
combined_next_x = self._combine(unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale)
|
||||
combined_next_x = self._combine(
|
||||
unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale
|
||||
)
|
||||
|
||||
return combined_next_x
|
||||
|
||||
@@ -176,24 +253,33 @@ class InvokeAIDiffuserComponent:
|
||||
latents: torch.Tensor,
|
||||
sigma,
|
||||
step_index,
|
||||
total_step_count
|
||||
total_step_count,
|
||||
) -> torch.Tensor:
|
||||
if postprocessing_settings is not None:
|
||||
percent_through = self.calculate_percent_through(sigma, step_index, total_step_count)
|
||||
latents = self.apply_threshold(postprocessing_settings, latents, percent_through)
|
||||
latents = self.apply_symmetry(postprocessing_settings, latents, percent_through)
|
||||
percent_through = self.calculate_percent_through(
|
||||
sigma, step_index, total_step_count
|
||||
)
|
||||
latents = self.apply_threshold(
|
||||
postprocessing_settings, latents, percent_through
|
||||
)
|
||||
latents = self.apply_symmetry(
|
||||
postprocessing_settings, latents, percent_through
|
||||
)
|
||||
return latents
|
||||
|
||||
def calculate_percent_through(self, sigma, step_index, total_step_count):
|
||||
if step_index is not None and total_step_count is not None:
|
||||
# 🧨diffusers codepath
|
||||
percent_through = step_index / total_step_count # will never reach 1.0 - this is deliberate
|
||||
percent_through = (
|
||||
step_index / total_step_count
|
||||
) # will never reach 1.0 - this is deliberate
|
||||
else:
|
||||
# legacy compvis codepath
|
||||
# TODO remove when compvis codepath support is dropped
|
||||
if step_index is None and sigma is None:
|
||||
raise ValueError(
|
||||
f"Either step_index or sigma is required when doing cross attention control, but both are None.")
|
||||
f"Either step_index or sigma is required when doing cross attention control, but both are None."
|
||||
)
|
||||
percent_through = self.estimate_percent_through(step_index, sigma)
|
||||
return percent_through
|
||||
|
||||
@@ -204,24 +290,30 @@ class InvokeAIDiffuserComponent:
|
||||
x_twice = torch.cat([x] * 2)
|
||||
sigma_twice = torch.cat([sigma] * 2)
|
||||
both_conditionings = torch.cat([unconditioning, conditioning])
|
||||
both_results = self.model_forward_callback(x_twice, sigma_twice, both_conditionings)
|
||||
both_results = self.model_forward_callback(
|
||||
x_twice, sigma_twice, both_conditionings
|
||||
)
|
||||
unconditioned_next_x, conditioned_next_x = both_results.chunk(2)
|
||||
if conditioned_next_x.device.type == 'mps':
|
||||
if conditioned_next_x.device.type == "mps":
|
||||
# prevent a result filled with zeros. seems to be a torch bug.
|
||||
conditioned_next_x = conditioned_next_x.clone()
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
|
||||
def _apply_standard_conditioning_sequentially(self, x: torch.Tensor, sigma, unconditioning: torch.Tensor, conditioning: torch.Tensor):
|
||||
def _apply_standard_conditioning_sequentially(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
sigma,
|
||||
unconditioning: torch.Tensor,
|
||||
conditioning: torch.Tensor,
|
||||
):
|
||||
# low-memory sequential path
|
||||
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning)
|
||||
conditioned_next_x = self.model_forward_callback(x, sigma, conditioning)
|
||||
if conditioned_next_x.device.type == 'mps':
|
||||
if conditioned_next_x.device.type == "mps":
|
||||
# prevent a result filled with zeros. seems to be a torch bug.
|
||||
conditioned_next_x = conditioned_next_x.clone()
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
|
||||
def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning):
|
||||
assert isinstance(conditioning, dict)
|
||||
assert isinstance(unconditioning, dict)
|
||||
@@ -236,48 +328,80 @@ class InvokeAIDiffuserComponent:
|
||||
]
|
||||
else:
|
||||
both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]])
|
||||
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2)
|
||||
unconditioned_next_x, conditioned_next_x = self.model_forward_callback(
|
||||
x_twice, sigma_twice, both_conditionings
|
||||
).chunk(2)
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
|
||||
def _apply_cross_attention_controlled_conditioning(self,
|
||||
x: torch.Tensor,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do):
|
||||
def _apply_cross_attention_controlled_conditioning(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do,
|
||||
):
|
||||
if self.is_running_diffusers:
|
||||
return self._apply_cross_attention_controlled_conditioning__diffusers(x, sigma, unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do)
|
||||
return self._apply_cross_attention_controlled_conditioning__diffusers(
|
||||
x,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do,
|
||||
)
|
||||
else:
|
||||
return self._apply_cross_attention_controlled_conditioning__compvis(x, sigma, unconditioning, conditioning,
|
||||
cross_attention_control_types_to_do)
|
||||
return self._apply_cross_attention_controlled_conditioning__compvis(
|
||||
x,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do,
|
||||
)
|
||||
|
||||
def _apply_cross_attention_controlled_conditioning__diffusers(self,
|
||||
x: torch.Tensor,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do):
|
||||
def _apply_cross_attention_controlled_conditioning__diffusers(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do,
|
||||
):
|
||||
context: Context = self.cross_attention_control_context
|
||||
|
||||
cross_attn_processor_context = SwapCrossAttnContext(modified_text_embeddings=context.arguments.edited_conditioning,
|
||||
index_map=context.cross_attention_index_map,
|
||||
mask=context.cross_attention_mask,
|
||||
cross_attention_types_to_do=[])
|
||||
cross_attn_processor_context = SwapCrossAttnContext(
|
||||
modified_text_embeddings=context.arguments.edited_conditioning,
|
||||
index_map=context.cross_attention_index_map,
|
||||
mask=context.cross_attention_mask,
|
||||
cross_attention_types_to_do=[],
|
||||
)
|
||||
# no cross attention for unconditioning (negative prompt)
|
||||
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning,
|
||||
{"swap_cross_attn_context": cross_attn_processor_context})
|
||||
unconditioned_next_x = self.model_forward_callback(
|
||||
x,
|
||||
sigma,
|
||||
unconditioning,
|
||||
{"swap_cross_attn_context": cross_attn_processor_context},
|
||||
)
|
||||
|
||||
# do requested cross attention types for conditioning (positive prompt)
|
||||
cross_attn_processor_context.cross_attention_types_to_do = cross_attention_control_types_to_do
|
||||
conditioned_next_x = self.model_forward_callback(x, sigma, conditioning,
|
||||
{"swap_cross_attn_context": cross_attn_processor_context})
|
||||
cross_attn_processor_context.cross_attention_types_to_do = (
|
||||
cross_attention_control_types_to_do
|
||||
)
|
||||
conditioned_next_x = self.model_forward_callback(
|
||||
x,
|
||||
sigma,
|
||||
conditioning,
|
||||
{"swap_cross_attn_context": cross_attn_processor_context},
|
||||
)
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
|
||||
def _apply_cross_attention_controlled_conditioning__compvis(self, x:torch.Tensor, sigma, unconditioning, conditioning, cross_attention_control_types_to_do):
|
||||
def _apply_cross_attention_controlled_conditioning__compvis(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
sigma,
|
||||
unconditioning,
|
||||
conditioning,
|
||||
cross_attention_control_types_to_do,
|
||||
):
|
||||
# print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
|
||||
# slower non-batched path (20% slower on mac MPS)
|
||||
# We are only interested in using attention maps for conditioned_next_x, but batching them with generation of
|
||||
@@ -287,24 +411,26 @@ class InvokeAIDiffuserComponent:
|
||||
# representing batched uncond + cond, but then when it comes to applying the saved attention, the
|
||||
# wrangler gets an attention tensor which only has shape[0]=8, representing just self.edited_conditionings.)
|
||||
# todo: give CrossAttentionControl's `wrangler` function more info so it can work with a batched call as well.
|
||||
context:Context = self.cross_attention_control_context
|
||||
context: Context = self.cross_attention_control_context
|
||||
|
||||
try:
|
||||
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning)
|
||||
|
||||
# process x using the original prompt, saving the attention maps
|
||||
#print("saving attention maps for", cross_attention_control_types_to_do)
|
||||
# print("saving attention maps for", cross_attention_control_types_to_do)
|
||||
for ca_type in cross_attention_control_types_to_do:
|
||||
context.request_save_attention_maps(ca_type)
|
||||
_ = self.model_forward_callback(x, sigma, conditioning)
|
||||
context.clear_requests(cleanup=False)
|
||||
|
||||
# process x again, using the saved attention maps to control where self.edited_conditioning will be applied
|
||||
#print("applying saved attention maps for", cross_attention_control_types_to_do)
|
||||
# print("applying saved attention maps for", cross_attention_control_types_to_do)
|
||||
for ca_type in cross_attention_control_types_to_do:
|
||||
context.request_apply_saved_attention_maps(ca_type)
|
||||
edited_conditioning = self.conditioning.cross_attention_control_args.edited_conditioning
|
||||
conditioned_next_x = self.model_forward_callback(x, sigma, edited_conditioning)
|
||||
edited_conditioning = context.arguments.edited_conditioning
|
||||
conditioned_next_x = self.model_forward_callback(
|
||||
x, sigma, edited_conditioning
|
||||
)
|
||||
context.clear_requests(cleanup=True)
|
||||
|
||||
except:
|
||||
@@ -323,17 +449,21 @@ class InvokeAIDiffuserComponent:
|
||||
self,
|
||||
postprocessing_settings: PostprocessingSettings,
|
||||
latents: torch.Tensor,
|
||||
percent_through: float
|
||||
percent_through: float,
|
||||
) -> torch.Tensor:
|
||||
|
||||
if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0:
|
||||
if (
|
||||
postprocessing_settings.threshold is None
|
||||
or postprocessing_settings.threshold == 0.0
|
||||
):
|
||||
return latents
|
||||
|
||||
threshold = postprocessing_settings.threshold
|
||||
warmup = postprocessing_settings.warmup
|
||||
|
||||
if percent_through < warmup:
|
||||
current_threshold = threshold + threshold * 5 * (1 - (percent_through / warmup))
|
||||
current_threshold = threshold + threshold * 5 * (
|
||||
1 - (percent_through / warmup)
|
||||
)
|
||||
else:
|
||||
current_threshold = threshold
|
||||
|
||||
@@ -347,10 +477,14 @@ class InvokeAIDiffuserComponent:
|
||||
|
||||
if self.debug_thresholding:
|
||||
std, mean = [i.item() for i in torch.std_mean(latents)]
|
||||
outside = torch.count_nonzero((latents < -current_threshold) | (latents > current_threshold))
|
||||
print(f"\nThreshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})\n"
|
||||
f" | min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}\n"
|
||||
f" | {outside / latents.numel() * 100:.2f}% values outside threshold")
|
||||
outside = torch.count_nonzero(
|
||||
(latents < -current_threshold) | (latents > current_threshold)
|
||||
)
|
||||
print(
|
||||
f"\nThreshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})\n"
|
||||
f" | min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}\n"
|
||||
f" | {outside / latents.numel() * 100:.2f}% values outside threshold"
|
||||
)
|
||||
|
||||
if maxval < current_threshold and minval > -current_threshold:
|
||||
return latents
|
||||
@@ -363,17 +497,23 @@ class InvokeAIDiffuserComponent:
|
||||
latents = torch.clone(latents)
|
||||
maxval = np.clip(maxval * scale, 1, current_threshold)
|
||||
num_altered += torch.count_nonzero(latents > maxval)
|
||||
latents[latents > maxval] = torch.rand_like(latents[latents > maxval]) * maxval
|
||||
latents[latents > maxval] = (
|
||||
torch.rand_like(latents[latents > maxval]) * maxval
|
||||
)
|
||||
|
||||
if minval < -current_threshold:
|
||||
latents = torch.clone(latents)
|
||||
minval = np.clip(minval * scale, -current_threshold, -1)
|
||||
num_altered += torch.count_nonzero(latents < minval)
|
||||
latents[latents < minval] = torch.rand_like(latents[latents < minval]) * minval
|
||||
latents[latents < minval] = (
|
||||
torch.rand_like(latents[latents < minval]) * minval
|
||||
)
|
||||
|
||||
if self.debug_thresholding:
|
||||
print(f" | min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})\n"
|
||||
f" | {num_altered / latents.numel() * 100:.2f}% values altered")
|
||||
print(
|
||||
f" | min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})\n"
|
||||
f" | {num_altered / latents.numel() * 100:.2f}% values altered"
|
||||
)
|
||||
|
||||
return latents
|
||||
|
||||
@@ -381,9 +521,8 @@ class InvokeAIDiffuserComponent:
|
||||
self,
|
||||
postprocessing_settings: PostprocessingSettings,
|
||||
latents: torch.Tensor,
|
||||
percent_through: float
|
||||
percent_through: float,
|
||||
) -> torch.Tensor:
|
||||
|
||||
# Reset our last percent through if this is our first step.
|
||||
if percent_through == 0.0:
|
||||
self.last_percent_through = 0.0
|
||||
@@ -393,36 +532,52 @@ class InvokeAIDiffuserComponent:
|
||||
|
||||
# Check for out of bounds
|
||||
h_symmetry_time_pct = postprocessing_settings.h_symmetry_time_pct
|
||||
if (h_symmetry_time_pct is not None and (h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0)):
|
||||
if h_symmetry_time_pct is not None and (
|
||||
h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0
|
||||
):
|
||||
h_symmetry_time_pct = None
|
||||
|
||||
v_symmetry_time_pct = postprocessing_settings.v_symmetry_time_pct
|
||||
if (v_symmetry_time_pct is not None and (v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0)):
|
||||
if v_symmetry_time_pct is not None and (
|
||||
v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0
|
||||
):
|
||||
v_symmetry_time_pct = None
|
||||
|
||||
dev = latents.device.type
|
||||
|
||||
latents.to(device='cpu')
|
||||
latents.to(device="cpu")
|
||||
|
||||
if (
|
||||
h_symmetry_time_pct != None and
|
||||
self.last_percent_through < h_symmetry_time_pct and
|
||||
percent_through >= h_symmetry_time_pct
|
||||
h_symmetry_time_pct != None
|
||||
and self.last_percent_through < h_symmetry_time_pct
|
||||
and percent_through >= h_symmetry_time_pct
|
||||
):
|
||||
# Horizontal symmetry occurs on the 3rd dimension of the latent
|
||||
width = latents.shape[3]
|
||||
x_flipped = torch.flip(latents, dims=[3])
|
||||
latents = torch.cat([latents[:, :, :, 0:int(width/2)], x_flipped[:, :, :, int(width/2):int(width)]], dim=3)
|
||||
latents = torch.cat(
|
||||
[
|
||||
latents[:, :, :, 0 : int(width / 2)],
|
||||
x_flipped[:, :, :, int(width / 2) : int(width)],
|
||||
],
|
||||
dim=3,
|
||||
)
|
||||
|
||||
if (
|
||||
v_symmetry_time_pct != None and
|
||||
self.last_percent_through < v_symmetry_time_pct and
|
||||
percent_through >= v_symmetry_time_pct
|
||||
v_symmetry_time_pct != None
|
||||
and self.last_percent_through < v_symmetry_time_pct
|
||||
and percent_through >= v_symmetry_time_pct
|
||||
):
|
||||
# Vertical symmetry occurs on the 2nd dimension of the latent
|
||||
height = latents.shape[2]
|
||||
y_flipped = torch.flip(latents, dims=[2])
|
||||
latents = torch.cat([latents[:, :, 0:int(height / 2)], y_flipped[:, :, int(height / 2):int(height)]], dim=2)
|
||||
latents = torch.cat(
|
||||
[
|
||||
latents[:, :, 0 : int(height / 2)],
|
||||
y_flipped[:, :, int(height / 2) : int(height)],
|
||||
],
|
||||
dim=2,
|
||||
)
|
||||
|
||||
self.last_percent_through = percent_through
|
||||
return latents.to(device=dev)
|
||||
@@ -430,7 +585,9 @@ class InvokeAIDiffuserComponent:
|
||||
def estimate_percent_through(self, step_index, sigma):
|
||||
if step_index is not None and self.cross_attention_control_context is not None:
|
||||
# percent_through will never reach 1.0 (but this is intended)
|
||||
return float(step_index) / float(self.cross_attention_control_context.step_count)
|
||||
return float(step_index) / float(
|
||||
self.cross_attention_control_context.step_count
|
||||
)
|
||||
# find the best possible index of the current sigma in the sigma sequence
|
||||
smaller_sigmas = torch.nonzero(self.model.sigmas <= sigma)
|
||||
sigma_index = smaller_sigmas[-1].item() if smaller_sigmas.shape[0] > 0 else 0
|
||||
@@ -439,33 +596,38 @@ class InvokeAIDiffuserComponent:
|
||||
return 1.0 - float(sigma_index + 1) / float(self.model.sigmas.shape[0])
|
||||
# print('estimated percent_through', percent_through, 'from sigma', sigma.item())
|
||||
|
||||
|
||||
# todo: make this work
|
||||
@classmethod
|
||||
def apply_conjunction(cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale):
|
||||
def apply_conjunction(
|
||||
cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale
|
||||
):
|
||||
x_in = torch.cat([x] * 2)
|
||||
t_in = torch.cat([t] * 2) # aka sigmas
|
||||
t_in = torch.cat([t] * 2) # aka sigmas
|
||||
|
||||
deltas = None
|
||||
uncond_latents = None
|
||||
weighted_cond_list = c_or_weighted_c_list if type(c_or_weighted_c_list) is list else [(c_or_weighted_c_list, 1)]
|
||||
weighted_cond_list = (
|
||||
c_or_weighted_c_list
|
||||
if type(c_or_weighted_c_list) is list
|
||||
else [(c_or_weighted_c_list, 1)]
|
||||
)
|
||||
|
||||
# below is fugly omg
|
||||
num_actual_conditionings = len(c_or_weighted_c_list)
|
||||
conditionings = [uc] + [c for c,weight in weighted_cond_list]
|
||||
weights = [1] + [weight for c,weight in weighted_cond_list]
|
||||
chunk_count = ceil(len(conditionings)/2)
|
||||
conditionings = [uc] + [c for c, weight in weighted_cond_list]
|
||||
weights = [1] + [weight for c, weight in weighted_cond_list]
|
||||
chunk_count = ceil(len(conditionings) / 2)
|
||||
deltas = None
|
||||
for chunk_index in range(chunk_count):
|
||||
offset = chunk_index*2
|
||||
chunk_size = min(2, len(conditionings)-offset)
|
||||
offset = chunk_index * 2
|
||||
chunk_size = min(2, len(conditionings) - offset)
|
||||
|
||||
if chunk_size == 1:
|
||||
c_in = conditionings[offset]
|
||||
latents_a = forward_func(x_in[:-1], t_in[:-1], c_in)
|
||||
latents_b = None
|
||||
else:
|
||||
c_in = torch.cat(conditionings[offset:offset+2])
|
||||
c_in = torch.cat(conditionings[offset : offset + 2])
|
||||
latents_a, latents_b = forward_func(x_in, t_in, c_in).chunk(2)
|
||||
|
||||
# first chunk is guaranteed to be 2 entries: uncond_latents + first conditioining
|
||||
@@ -478,11 +640,15 @@ class InvokeAIDiffuserComponent:
|
||||
deltas = torch.cat((deltas, latents_b - uncond_latents))
|
||||
|
||||
# merge the weighted deltas together into a single merged delta
|
||||
per_delta_weights = torch.tensor(weights[1:], dtype=deltas.dtype, device=deltas.device)
|
||||
per_delta_weights = torch.tensor(
|
||||
weights[1:], dtype=deltas.dtype, device=deltas.device
|
||||
)
|
||||
normalize = False
|
||||
if normalize:
|
||||
per_delta_weights /= torch.sum(per_delta_weights)
|
||||
reshaped_weights = per_delta_weights.reshape(per_delta_weights.shape + (1, 1, 1))
|
||||
reshaped_weights = per_delta_weights.reshape(
|
||||
per_delta_weights.shape + (1, 1, 1)
|
||||
)
|
||||
deltas_merged = torch.sum(deltas * reshaped_weights, dim=0, keepdim=True)
|
||||
|
||||
# old_return_value = super().forward(x, sigma, uncond, cond, cond_scale)
|
||||
|
||||
@@ -463,6 +463,9 @@ class FrozenCLIPEmbedder(AbstractEncoder):
|
||||
def encode(self, text, **kwargs):
|
||||
return self(text, **kwargs)
|
||||
|
||||
def set_textual_inversion_manager(self, manager): #TextualInversionManager):
|
||||
self.textual_inversion_manager = manager
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
return self.transformer.device
|
||||
@@ -476,10 +479,6 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder):
|
||||
fragment_weights_key = "fragment_weights"
|
||||
return_tokens_key = "return_tokens"
|
||||
|
||||
def set_textual_inversion_manager(self, manager): #TextualInversionManager):
|
||||
# TODO all of the weighting and expanding stuff needs be moved out of this class
|
||||
self.textual_inversion_manager = manager
|
||||
|
||||
def forward(self, text: list, **kwargs):
|
||||
# TODO all of the weighting and expanding stuff needs be moved out of this class
|
||||
'''
|
||||
|
||||
367
ldm/modules/kohya_lora_manager.py
Normal file
367
ldm/modules/kohya_lora_manager.py
Normal file
@@ -0,0 +1,367 @@
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from compel import Compel
|
||||
from diffusers.models import UNet2DConditionModel
|
||||
from safetensors.torch import load_file
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
from transformers import CLIPTextModel
|
||||
|
||||
from ldm.invoke.devices import choose_torch_device
|
||||
|
||||
"""
|
||||
This module supports loading LoRA weights trained with https://github.com/kohya-ss/sd-scripts
|
||||
To be removed once support for diffusers LoRA weights is well supported
|
||||
"""
|
||||
|
||||
|
||||
class LoRALayer:
|
||||
lora_name: str
|
||||
name: str
|
||||
scale: float
|
||||
|
||||
up: torch.nn.Module
|
||||
mid: Optional[torch.nn.Module] = None
|
||||
down: torch.nn.Module
|
||||
|
||||
def __init__(self, lora_name: str, name: str, rank=4, alpha=1.0):
|
||||
self.lora_name = lora_name
|
||||
self.name = name
|
||||
self.scale = alpha / rank if (alpha and rank) else 1.0
|
||||
|
||||
def forward(self, lora, input_h, output):
|
||||
if self.mid is None:
|
||||
output = (
|
||||
output
|
||||
+ self.up(self.down(*input_h)) * lora.multiplier * self.scale
|
||||
)
|
||||
else:
|
||||
output = (
|
||||
output
|
||||
+ self.up(self.mid(self.down(*input_h))) * lora.multiplier * self.scale
|
||||
)
|
||||
return output
|
||||
|
||||
class LoHALayer:
|
||||
lora_name: str
|
||||
name: str
|
||||
scale: float
|
||||
|
||||
w1_a: torch.Tensor
|
||||
w1_b: torch.Tensor
|
||||
w2_a: torch.Tensor
|
||||
w2_b: torch.Tensor
|
||||
t1: Optional[torch.Tensor] = None
|
||||
t2: Optional[torch.Tensor] = None
|
||||
bias: Optional[torch.Tensor] = None
|
||||
|
||||
org_module: torch.nn.Module
|
||||
|
||||
def __init__(self, lora_name: str, name: str, rank=4, alpha=1.0):
|
||||
self.lora_name = lora_name
|
||||
self.name = name
|
||||
self.scale = alpha / rank if (alpha and rank) else 1.0
|
||||
|
||||
def forward(self, lora, input_h, output):
|
||||
|
||||
if type(self.org_module) == torch.nn.Conv2d:
|
||||
op = torch.nn.functional.conv2d
|
||||
extra_args = dict(
|
||||
stride=self.org_module.stride,
|
||||
padding=self.org_module.padding,
|
||||
dilation=self.org_module.dilation,
|
||||
groups=self.org_module.groups,
|
||||
)
|
||||
|
||||
else:
|
||||
op = torch.nn.functional.linear
|
||||
extra_args = {}
|
||||
|
||||
if self.t1 is None:
|
||||
weight = ((self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b))
|
||||
|
||||
else:
|
||||
rebuild1 = torch.einsum('i j k l, j r, i p -> p r k l', self.t1, self.w1_b, self.w1_a)
|
||||
rebuild2 = torch.einsum('i j k l, j r, i p -> p r k l', self.t2, self.w2_b, self.w2_a)
|
||||
weight = rebuild1 * rebuild2
|
||||
|
||||
bias = self.bias if self.bias is not None else 0
|
||||
return output + op(
|
||||
*input_h,
|
||||
(weight + bias).view(self.org_module.weight.shape),
|
||||
None,
|
||||
**extra_args,
|
||||
) * lora.multiplier * self.scale
|
||||
|
||||
|
||||
class LoRAModuleWrapper:
|
||||
unet: UNet2DConditionModel
|
||||
text_encoder: CLIPTextModel
|
||||
hooks: list[RemovableHandle]
|
||||
|
||||
def __init__(self, unet, text_encoder):
|
||||
self.unet = unet
|
||||
self.text_encoder = text_encoder
|
||||
self.hooks = []
|
||||
self.text_modules = None
|
||||
self.unet_modules = None
|
||||
|
||||
self.applied_loras = {}
|
||||
self.loaded_loras = {}
|
||||
|
||||
self.UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention", "ResnetBlock2D", "Downsample2D", "Upsample2D", "SpatialTransformer"]
|
||||
self.TEXT_ENCODER_TARGET_REPLACE_MODULE = ["ResidualAttentionBlock", "CLIPAttention", "CLIPMLP"]
|
||||
self.LORA_PREFIX_UNET = "lora_unet"
|
||||
self.LORA_PREFIX_TEXT_ENCODER = "lora_te"
|
||||
|
||||
|
||||
def find_modules(
|
||||
prefix, root_module: torch.nn.Module, target_replace_modules
|
||||
) -> dict[str, torch.nn.Module]:
|
||||
mapping = {}
|
||||
for name, module in root_module.named_modules():
|
||||
if module.__class__.__name__ in target_replace_modules:
|
||||
for child_name, child_module in module.named_modules():
|
||||
layer_type = child_module.__class__.__name__
|
||||
if layer_type == "Linear" or (
|
||||
layer_type == "Conv2d"
|
||||
and child_module.kernel_size in [(1, 1), (3, 3)]
|
||||
):
|
||||
lora_name = prefix + "." + name + "." + child_name
|
||||
lora_name = lora_name.replace(".", "_")
|
||||
mapping[lora_name] = child_module
|
||||
self.apply_module_forward(child_module, lora_name)
|
||||
return mapping
|
||||
|
||||
if self.text_modules is None:
|
||||
self.text_modules = find_modules(
|
||||
self.LORA_PREFIX_TEXT_ENCODER,
|
||||
text_encoder,
|
||||
self.TEXT_ENCODER_TARGET_REPLACE_MODULE,
|
||||
)
|
||||
|
||||
if self.unet_modules is None:
|
||||
self.unet_modules = find_modules(
|
||||
self.LORA_PREFIX_UNET, unet, self.UNET_TARGET_REPLACE_MODULE
|
||||
)
|
||||
|
||||
|
||||
def lora_forward_hook(self, name):
|
||||
wrapper = self
|
||||
|
||||
def lora_forward(module, input_h, output):
|
||||
if len(wrapper.loaded_loras) == 0:
|
||||
return output
|
||||
|
||||
for lora in wrapper.applied_loras.values():
|
||||
layer = lora.layers.get(name, None)
|
||||
if layer is None:
|
||||
continue
|
||||
output = layer.forward(lora, input_h, output)
|
||||
return output
|
||||
|
||||
return lora_forward
|
||||
|
||||
def apply_module_forward(self, module, name):
|
||||
handle = module.register_forward_hook(self.lora_forward_hook(name))
|
||||
self.hooks.append(handle)
|
||||
|
||||
def clear_hooks(self):
|
||||
for hook in self.hooks:
|
||||
hook.remove()
|
||||
|
||||
self.hooks.clear()
|
||||
|
||||
def clear_applied_loras(self):
|
||||
self.applied_loras.clear()
|
||||
|
||||
def clear_loaded_loras(self):
|
||||
self.loaded_loras.clear()
|
||||
|
||||
class LoRA:
|
||||
name: str
|
||||
layers: dict[str, LoRALayer]
|
||||
device: torch.device
|
||||
dtype: torch.dtype
|
||||
wrapper: LoRAModuleWrapper
|
||||
multiplier: float
|
||||
|
||||
def __init__(self, name: str, device, dtype, wrapper, multiplier=1.0):
|
||||
self.name = name
|
||||
self.layers = {}
|
||||
self.multiplier = multiplier
|
||||
self.device = device
|
||||
self.dtype = dtype
|
||||
self.wrapper = wrapper
|
||||
|
||||
def load_from_dict(self, state_dict):
|
||||
state_dict_groupped = dict()
|
||||
|
||||
for key, value in state_dict.items():
|
||||
stem, leaf = key.split(".", 1)
|
||||
if stem not in state_dict_groupped:
|
||||
state_dict_groupped[stem] = dict()
|
||||
state_dict_groupped[stem][leaf] = value
|
||||
|
||||
|
||||
for stem, values in state_dict_groupped.items():
|
||||
if stem.startswith(self.wrapper.LORA_PREFIX_TEXT_ENCODER):
|
||||
wrapped = self.wrapper.text_modules.get(stem, None)
|
||||
elif stem.startswith(self.wrapper.LORA_PREFIX_UNET):
|
||||
wrapped = self.wrapper.unet_modules.get(stem, None)
|
||||
else:
|
||||
continue
|
||||
|
||||
if wrapped is None:
|
||||
print(f">> Missing layer: {stem}")
|
||||
continue
|
||||
|
||||
# TODO: diff key
|
||||
|
||||
bias = None
|
||||
alpha = None
|
||||
|
||||
if "alpha" in values:
|
||||
alpha = values["alpha"].item()
|
||||
|
||||
if "bias_indices" in values and "bias_values" in values and "bias_size" in values:
|
||||
bias = torch.sparse_coo_tensor(
|
||||
values["bias_indices"],
|
||||
values["bias_values"],
|
||||
tuple(values["bias_size"]),
|
||||
).to(device=self.device, dtype=self.dtype)
|
||||
|
||||
|
||||
# lora and locon
|
||||
if "lora_down.weight" in values:
|
||||
value_down = values["lora_down.weight"]
|
||||
value_mid = values.get("lora_mid.weight", None)
|
||||
value_up = values["lora_up.weight"]
|
||||
|
||||
if type(wrapped) == torch.nn.Conv2d:
|
||||
if value_mid is not None:
|
||||
layer_down = torch.nn.Conv2d(value_down.shape[1], value_down.shape[0], (1, 1), bias=False)
|
||||
layer_mid = torch.nn.Conv2d(value_mid.shape[1], value_mid.shape[0], wrapped.kernel_size, wrapped.stride, wrapped.padding, bias=False)
|
||||
else:
|
||||
layer_down = torch.nn.Conv2d(value_down.shape[1], value_down.shape[0], wrapped.kernel_size, wrapped.stride, wrapped.padding, bias=False)
|
||||
layer_mid = None
|
||||
|
||||
layer_up = torch.nn.Conv2d(value_up.shape[1], value_up.shape[0], (1, 1), bias=False)
|
||||
|
||||
elif type(wrapped) == torch.nn.Linear:
|
||||
layer_down = torch.nn.Linear(value_down.shape[1], value_down.shape[0], bias=False)
|
||||
layer_mid = None
|
||||
layer_up = torch.nn.Linear(value_up.shape[1], value_up.shape[0], bias=False)
|
||||
|
||||
else:
|
||||
print(
|
||||
f">> Encountered unknown lora layer module in {self.name}: {stem} - {type(wrapped).__name__}"
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
with torch.no_grad():
|
||||
layer_down.weight.copy_(value_down)
|
||||
if layer_mid is not None:
|
||||
layer_mid.weight.copy_(value_mid)
|
||||
layer_up.weight.copy_(value_up)
|
||||
|
||||
|
||||
layer_down.to(device=self.device, dtype=self.dtype)
|
||||
if layer_mid is not None:
|
||||
layer_mid.to(device=self.device, dtype=self.dtype)
|
||||
layer_up.to(device=self.device, dtype=self.dtype)
|
||||
|
||||
|
||||
rank = value_down.shape[0]
|
||||
|
||||
layer = LoRALayer(self.name, stem, rank, alpha)
|
||||
#layer.bias = bias # TODO: find and debug lora/locon with bias
|
||||
layer.down = layer_down
|
||||
layer.mid = layer_mid
|
||||
layer.up = layer_up
|
||||
|
||||
# loha
|
||||
elif "hada_w1_b" in values:
|
||||
|
||||
rank = values["hada_w1_b"].shape[0]
|
||||
|
||||
layer = LoHALayer(self.name, stem, rank, alpha)
|
||||
layer.org_module = wrapped
|
||||
layer.bias = bias
|
||||
|
||||
layer.w1_a = values["hada_w1_a"].to(device=self.device, dtype=self.dtype)
|
||||
layer.w1_b = values["hada_w1_b"].to(device=self.device, dtype=self.dtype)
|
||||
layer.w2_a = values["hada_w2_a"].to(device=self.device, dtype=self.dtype)
|
||||
layer.w2_b = values["hada_w2_b"].to(device=self.device, dtype=self.dtype)
|
||||
|
||||
if "hada_t1" in values:
|
||||
layer.t1 = values["hada_t1"].to(device=self.device, dtype=self.dtype)
|
||||
else:
|
||||
layer.t1 = None
|
||||
|
||||
if "hada_t2" in values:
|
||||
layer.t2 = values["hada_t2"].to(device=self.device, dtype=self.dtype)
|
||||
else:
|
||||
layer.t2 = None
|
||||
|
||||
else:
|
||||
print(
|
||||
f">> Encountered unknown lora layer module in {self.name}: {stem} - {type(wrapped).__name__}"
|
||||
)
|
||||
return
|
||||
|
||||
self.layers[stem] = layer
|
||||
|
||||
|
||||
class KohyaLoraManager:
|
||||
def __init__(self, pipe, lora_path):
|
||||
self.unet = pipe.unet
|
||||
self.lora_path = lora_path
|
||||
self.wrapper = LoRAModuleWrapper(pipe.unet, pipe.text_encoder)
|
||||
self.text_encoder = pipe.text_encoder
|
||||
self.device = torch.device(choose_torch_device())
|
||||
self.dtype = pipe.unet.dtype
|
||||
|
||||
def load_lora_module(self, name, path_file, multiplier: float = 1.0):
|
||||
print(f" | Found lora {name} at {path_file}")
|
||||
if path_file.suffix == ".safetensors":
|
||||
checkpoint = load_file(path_file.absolute().as_posix(), device="cpu")
|
||||
else:
|
||||
checkpoint = torch.load(path_file, map_location="cpu")
|
||||
|
||||
lora = LoRA(name, self.device, self.dtype, self.wrapper, multiplier)
|
||||
lora.load_from_dict(checkpoint)
|
||||
self.wrapper.loaded_loras[name] = lora
|
||||
|
||||
return lora
|
||||
|
||||
def apply_lora_model(self, name, mult: float = 1.0):
|
||||
for suffix in ["ckpt", "safetensors", "pt"]:
|
||||
path_file = Path(self.lora_path, f"{name}.{suffix}")
|
||||
if path_file.exists():
|
||||
print(f" | Loading lora {path_file.name} with weight {mult}")
|
||||
break
|
||||
if not path_file.exists():
|
||||
print(f" ** Unable to find lora: {name}")
|
||||
return
|
||||
|
||||
lora = self.wrapper.loaded_loras.get(name, None)
|
||||
if lora is None:
|
||||
lora = self.load_lora_module(name, path_file, mult)
|
||||
|
||||
lora.multiplier = mult
|
||||
self.wrapper.applied_loras[name] = lora
|
||||
|
||||
def unload_applied_lora(self, lora_name: str):
|
||||
if lora_name in self.wrapper.applied_loras:
|
||||
del self.wrapper.applied_loras[lora_name]
|
||||
|
||||
def unload_lora(self, lora_name: str):
|
||||
if lora_name in self.wrapper.loaded_loras:
|
||||
del self.wrapper.loaded_loras[lora_name]
|
||||
|
||||
def clear_loras(self):
|
||||
self.wrapper.clear_applied_loras()
|
||||
66
ldm/modules/lora_manager.py
Normal file
66
ldm/modules/lora_manager.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from ldm.invoke.globals import global_lora_models_dir
|
||||
from .kohya_lora_manager import KohyaLoraManager
|
||||
from typing import Optional, Dict
|
||||
|
||||
class LoraCondition:
|
||||
name: str
|
||||
weight: float
|
||||
|
||||
def __init__(self, name, weight: float = 1.0, kohya_manager: Optional[KohyaLoraManager]=None):
|
||||
self.name = name
|
||||
self.weight = weight
|
||||
self.kohya_manager = kohya_manager
|
||||
|
||||
def __call__(self, model):
|
||||
# TODO: make model able to load from huggingface, rather then just local files
|
||||
path = Path(global_lora_models_dir(), self.name)
|
||||
if path.is_dir():
|
||||
if model.load_attn_procs:
|
||||
file = Path(path, "pytorch_lora_weights.bin")
|
||||
if file.is_file():
|
||||
print(f">> Loading LoRA: {path}")
|
||||
model.load_attn_procs(path.absolute().as_posix())
|
||||
else:
|
||||
print(f" ** Unable to find valid LoRA at: {path}")
|
||||
else:
|
||||
print(" ** Invalid Model to load LoRA")
|
||||
elif self.kohya_manager:
|
||||
self.kohya_manager.apply_lora_model(self.name,self.weight)
|
||||
else:
|
||||
print(" ** Unable to load LoRA")
|
||||
|
||||
def unload(self):
|
||||
if self.kohya_manager:
|
||||
print(f'>> unloading LoRA {self.name}')
|
||||
self.kohya_manager.unload_applied_lora(self.name)
|
||||
|
||||
class LoraManager:
|
||||
def __init__(self, pipe):
|
||||
# Kohya class handles lora not generated through diffusers
|
||||
self.kohya = KohyaLoraManager(pipe, global_lora_models_dir())
|
||||
|
||||
def set_loras_conditions(self, lora_weights: list):
|
||||
conditions = []
|
||||
if len(lora_weights) > 0:
|
||||
for lora in lora_weights:
|
||||
conditions.append(LoraCondition(lora.model, lora.weight, self.kohya))
|
||||
|
||||
if len(conditions) > 0:
|
||||
return conditions
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def list_loras(self)->Dict[str, Path]:
|
||||
path = Path(global_lora_models_dir())
|
||||
models_found = dict()
|
||||
for root,_,files in os.walk(path):
|
||||
for x in files:
|
||||
name = Path(x).stem
|
||||
suffix = Path(x).suffix
|
||||
if suffix in [".ckpt", ".pt", ".safetensors"]:
|
||||
models_found[name]=Path(root,x)
|
||||
return models_found
|
||||
|
||||
95
ldm/modules/peft_manager.py
Normal file
95
ldm/modules/peft_manager.py
Normal file
@@ -0,0 +1,95 @@
|
||||
from peft import LoraModel, LoraConfig, set_peft_model_state_dict
|
||||
import torch
|
||||
import json
|
||||
from pathlib import Path
|
||||
from ldm.invoke.globals import global_lora_models_dir
|
||||
|
||||
class LoraPeftModule:
|
||||
def __init__(self, lora_dir, multiplier: float = 1.0):
|
||||
self.lora_dir = lora_dir
|
||||
self.multiplier = multiplier
|
||||
self.config = self.load_config()
|
||||
self.checkpoint = self.load_checkpoint()
|
||||
|
||||
def load_config(self):
|
||||
lora_config_file = Path(self.lora_dir, f'lora_config.json')
|
||||
with open(lora_config_file, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
def load_checkpoint(self):
|
||||
return torch.load(Path(self.lora_dir, f'lora.pt'))
|
||||
|
||||
def unet(self, text_encoder):
|
||||
lora_ds = {
|
||||
k.replace("text_encoder_", ""): v for k, v in self.checkpoint.items() if "text_encoder_" in k
|
||||
}
|
||||
config = LoraConfig(**self.config["peft_config"])
|
||||
model = LoraModel(config, text_encoder)
|
||||
set_peft_model_state_dict(model, lora_ds)
|
||||
return model
|
||||
|
||||
def text_encoder(self, unet):
|
||||
lora_ds = {
|
||||
k: v for k, v in self.checkpoint.items() if "text_encoder_" not in k
|
||||
}
|
||||
config = LoraConfig(**self.config["text_encoder_peft_config"])
|
||||
model = LoraModel(config, unet)
|
||||
set_peft_model_state_dict(model, lora_ds)
|
||||
return model
|
||||
|
||||
def apply(self, pipe, dtype):
|
||||
pipe.unet = self.unet(pipe.unet)
|
||||
if "text_encoder_peft_config" in self.config:
|
||||
pipe.text_encoder = self.text_encoder(pipe.text_encoder)
|
||||
|
||||
if dtype in (torch.float16, torch.bfloat16):
|
||||
pipe.unet.half()
|
||||
pipe.text_encoder.half()
|
||||
|
||||
return pipe
|
||||
|
||||
|
||||
class PeftManager:
|
||||
modules: list[LoraPeftModule]
|
||||
|
||||
def __init__(self):
|
||||
self.lora_path = global_lora_models_dir()
|
||||
self.modules = []
|
||||
|
||||
def set_loras(self, lora_weights: list):
|
||||
if len(lora_weights) > 0:
|
||||
for lora in lora_weights:
|
||||
self.add(lora.model, lora.weight)
|
||||
|
||||
def add(self, name, multiplier: float = 1.0):
|
||||
lora_dir = Path(self.lora_path, name)
|
||||
|
||||
if lora_dir.exists():
|
||||
lora_config_file = Path(lora_dir, f'lora_config.json')
|
||||
lora_checkpoint = Path(lora_dir, f'lora.pt')
|
||||
|
||||
if lora_config_file.exists() and lora_checkpoint.exists():
|
||||
self.modules.append(LoraPeftModule(lora_dir, multiplier))
|
||||
return
|
||||
|
||||
print(f">> Failed to load lora {name}")
|
||||
|
||||
def load(self, pipe, dtype):
|
||||
if len(self.modules) > 0:
|
||||
for module in self.modules:
|
||||
pipe = module.apply(pipe, dtype)
|
||||
|
||||
return pipe
|
||||
|
||||
# Simple check to allow previous functionality
|
||||
def should_use(self, lora_weights: list):
|
||||
if len(lora_weights) > 0:
|
||||
for lora in lora_weights:
|
||||
lora_dir = Path(self.lora_path, lora.model)
|
||||
if lora_dir.exists():
|
||||
lora_config_file = Path(lora_dir, f'lora_config.json')
|
||||
lora_checkpoint = Path(lora_dir, f'lora.pt')
|
||||
if lora_config_file.exists() and lora_checkpoint.exists():
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -1,9 +1,9 @@
|
||||
import os
|
||||
import traceback
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
@@ -71,21 +71,6 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
|
||||
if str(ckpt_path).endswith(".DS_Store"):
|
||||
return
|
||||
|
||||
try:
|
||||
scan_result = scan_file_path(str(ckpt_path))
|
||||
if scan_result.infected_files == 1:
|
||||
print(
|
||||
f"\n### Security Issues Found in Model: {scan_result.issues_count}"
|
||||
)
|
||||
print("### For your safety, InvokeAI will not load this embed.")
|
||||
return
|
||||
except Exception:
|
||||
print(
|
||||
f"### {ckpt_path.parents[0].name}/{ckpt_path.name} is damaged or corrupt."
|
||||
)
|
||||
return
|
||||
|
||||
embedding_info = self._parse_embedding(str(ckpt_path))
|
||||
|
||||
if embedding_info is None:
|
||||
@@ -96,7 +81,7 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
!= embedding_info["token_dim"]
|
||||
):
|
||||
print(
|
||||
f"** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with an incompatible token dimension: {self.text_encoder.get_input_embeddings().weight.data[0].shape[0]} vs {embedding_info['token_dim']}."
|
||||
f" ** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with an incompatible token dimension: {self.text_encoder.get_input_embeddings().weight.data[0].shape[0]} vs {embedding_info['token_dim']}."
|
||||
)
|
||||
return
|
||||
|
||||
@@ -309,92 +294,72 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
|
||||
return token_id
|
||||
|
||||
def _parse_embedding(self, embedding_file: str):
|
||||
file_type = embedding_file.split(".")[-1]
|
||||
if file_type == "pt":
|
||||
return self._parse_embedding_pt(embedding_file)
|
||||
elif file_type == "bin":
|
||||
return self._parse_embedding_bin(embedding_file)
|
||||
else:
|
||||
print(f"** Notice: unrecognized embedding file format: {embedding_file}")
|
||||
def _parse_embedding(self, embedding_file: str)->dict:
|
||||
suffix = Path(embedding_file).suffix
|
||||
try:
|
||||
if suffix in [".pt",".ckpt",".bin"]:
|
||||
scan_result = scan_file_path(embedding_file)
|
||||
if scan_result.infected_files == 1:
|
||||
print(
|
||||
f" ** Security Issues Found in Model: {scan_result.issues_count}"
|
||||
)
|
||||
print(" ** For your safety, InvokeAI will not load this embed.")
|
||||
return
|
||||
ckpt = torch.load(embedding_file,map_location="cpu")
|
||||
else:
|
||||
ckpt = safetensors.torch.load_file(embedding_file)
|
||||
except Exception as e:
|
||||
print(f" ** Notice: unrecognized embedding file format: {embedding_file}: {e}")
|
||||
return None
|
||||
|
||||
def _parse_embedding_pt(self, embedding_file):
|
||||
embedding_ckpt = torch.load(embedding_file, map_location="cpu")
|
||||
embedding_info = {}
|
||||
|
||||
# Check if valid embedding file
|
||||
if "string_to_token" and "string_to_param" in embedding_ckpt:
|
||||
# Catch variants that do not have the expected keys or values.
|
||||
try:
|
||||
embedding_info["name"] = embedding_ckpt["name"] or os.path.basename(
|
||||
os.path.splitext(embedding_file)[0]
|
||||
)
|
||||
|
||||
# Check num of embeddings and warn user only the first will be used
|
||||
embedding_info["num_of_embeddings"] = len(
|
||||
embedding_ckpt["string_to_token"]
|
||||
)
|
||||
if embedding_info["num_of_embeddings"] > 1:
|
||||
print(">> More than 1 embedding found. Will use the first one")
|
||||
|
||||
embedding = list(embedding_ckpt["string_to_param"].values())[0]
|
||||
except (AttributeError, KeyError):
|
||||
return self._handle_broken_pt_variants(embedding_ckpt, embedding_file)
|
||||
|
||||
embedding_info["embedding"] = embedding
|
||||
embedding_info["num_vectors_per_token"] = embedding.size()[0]
|
||||
embedding_info["token_dim"] = embedding.size()[1]
|
||||
|
||||
try:
|
||||
embedding_info["trained_steps"] = embedding_ckpt["step"]
|
||||
embedding_info["trained_model_name"] = embedding_ckpt[
|
||||
"sd_checkpoint_name"
|
||||
]
|
||||
embedding_info["trained_model_checksum"] = embedding_ckpt[
|
||||
"sd_checkpoint"
|
||||
]
|
||||
except AttributeError:
|
||||
print(">> No Training Details Found. Passing ...")
|
||||
|
||||
# .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/
|
||||
# They are actually .bin files
|
||||
elif len(embedding_ckpt.keys()) == 1:
|
||||
embedding_info = self._parse_embedding_bin(embedding_file)
|
||||
|
||||
|
||||
# try to figure out what kind of embedding file it is and parse accordingly
|
||||
keys = list(ckpt.keys())
|
||||
if all(x in keys for x in ['string_to_token','string_to_param','name','step']):
|
||||
return self._parse_embedding_v1(ckpt, embedding_file) # example rem_rezero.pt
|
||||
|
||||
elif all(x in keys for x in ['string_to_token','string_to_param']):
|
||||
return self._parse_embedding_v2(ckpt, embedding_file) # example midj-strong.pt
|
||||
|
||||
elif 'emb_params' in keys:
|
||||
return self._parse_embedding_v3(ckpt, embedding_file) # example easynegative.safetensors
|
||||
|
||||
else:
|
||||
print(">> Invalid embedding format")
|
||||
embedding_info = None
|
||||
return self._parse_embedding_v4(ckpt, embedding_file) # usually a '.bin' file
|
||||
|
||||
def _parse_embedding_v1(self, embedding_ckpt: dict, file_path: str):
|
||||
basename = Path(file_path).stem
|
||||
print(f' | Loading v1 embedding file: {basename}')
|
||||
|
||||
embedding_info = {}
|
||||
embedding_info["name"] = embedding_ckpt["name"]
|
||||
|
||||
# Check num of embeddings and warn user only the first will be used
|
||||
embedding_info["num_of_embeddings"] = len(
|
||||
embedding_ckpt["string_to_token"]
|
||||
)
|
||||
if embedding_info["num_of_embeddings"] > 1:
|
||||
print(" | More than 1 embedding found. Will use the first one")
|
||||
embedding = list(embedding_ckpt["string_to_param"].values())[0]
|
||||
embedding_info["embedding"] = embedding
|
||||
embedding_info["num_vectors_per_token"] = embedding.size()[0]
|
||||
embedding_info["token_dim"] = embedding.size()[1]
|
||||
embedding_info["trained_steps"] = embedding_ckpt["step"]
|
||||
embedding_info["trained_model_name"] = embedding_ckpt[
|
||||
"sd_checkpoint_name"
|
||||
]
|
||||
embedding_info["trained_model_checksum"] = embedding_ckpt[
|
||||
"sd_checkpoint"
|
||||
]
|
||||
return embedding_info
|
||||
|
||||
def _parse_embedding_bin(self, embedding_file):
|
||||
embedding_ckpt = torch.load(embedding_file, map_location="cpu")
|
||||
embedding_info = {}
|
||||
|
||||
if list(embedding_ckpt.keys()) == 0:
|
||||
print(">> Invalid concepts file")
|
||||
embedding_info = None
|
||||
else:
|
||||
for token in list(embedding_ckpt.keys()):
|
||||
embedding_info["name"] = (
|
||||
token
|
||||
or f"<{os.path.basename(os.path.splitext(embedding_file)[0])}>"
|
||||
)
|
||||
embedding_info["embedding"] = embedding_ckpt[token]
|
||||
embedding_info[
|
||||
"num_vectors_per_token"
|
||||
] = 1 # All Concepts seem to default to 1
|
||||
embedding_info["token_dim"] = embedding_info["embedding"].size()[0]
|
||||
|
||||
return embedding_info
|
||||
|
||||
def _handle_broken_pt_variants(
|
||||
self, embedding_ckpt: dict, embedding_file: str
|
||||
def _parse_embedding_v2 (
|
||||
self, embedding_ckpt: dict, file_path: str
|
||||
) -> dict:
|
||||
"""
|
||||
This handles the broken .pt file variants. We only know of one at present.
|
||||
This handles embedding .pt file variant #2.
|
||||
"""
|
||||
basename = Path(file_path).stem
|
||||
print(f' | Loading v2 embedding file: {basename}')
|
||||
embedding_info = {}
|
||||
if isinstance(
|
||||
list(embedding_ckpt["string_to_token"].values())[0], torch.Tensor
|
||||
@@ -403,7 +368,7 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
embedding_info["name"] = (
|
||||
token
|
||||
if token != "*"
|
||||
else f"<{os.path.basename(os.path.splitext(embedding_file)[0])}>"
|
||||
else f"<{basename}>"
|
||||
)
|
||||
embedding_info["embedding"] = embedding_ckpt[
|
||||
"string_to_param"
|
||||
@@ -413,7 +378,46 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
].shape[0]
|
||||
embedding_info["token_dim"] = embedding_info["embedding"].size()[1]
|
||||
else:
|
||||
print(">> Invalid embedding format")
|
||||
print(f" ** {basename}: Unrecognized embedding format")
|
||||
embedding_info = None
|
||||
|
||||
return embedding_info
|
||||
|
||||
def _parse_embedding_v3(self, embedding_ckpt: dict, file_path: str):
|
||||
"""
|
||||
Parse 'version 3' of the .pt textual inversion embedding files.
|
||||
"""
|
||||
basename = Path(file_path).stem
|
||||
print(f' | Loading v3 embedding file: {basename}')
|
||||
embedding_info = {}
|
||||
embedding_info["name"] = f'<{basename}>'
|
||||
embedding_info["num_of_embeddings"] = 1
|
||||
embedding = embedding_ckpt['emb_params']
|
||||
embedding_info["embedding"] = embedding
|
||||
embedding_info["num_vectors_per_token"] = embedding.size()[0]
|
||||
embedding_info["token_dim"] = embedding.size()[1]
|
||||
return embedding_info
|
||||
|
||||
def _parse_embedding_v4(self, embedding_ckpt: dict, filepath: str):
|
||||
"""
|
||||
Parse 'version 4' of the textual inversion embedding files. This one
|
||||
is usually associated with .bin files trained by HuggingFace diffusers.
|
||||
"""
|
||||
basename = Path(filepath).stem
|
||||
short_path = Path(filepath).parents[0].name+'/'+Path(filepath).name
|
||||
|
||||
print(f' | Loading v4 embedding file: {short_path}')
|
||||
embedding_info = {}
|
||||
if list(embedding_ckpt.keys()) == 0:
|
||||
print(f" ** Invalid embeddings file: {short_path}")
|
||||
embedding_info = None
|
||||
else:
|
||||
for token in list(embedding_ckpt.keys()):
|
||||
embedding_info["name"] = (
|
||||
token
|
||||
or f"<{basename}>"
|
||||
)
|
||||
embedding_info["embedding"] = embedding_ckpt[token]
|
||||
embedding_info["num_vectors_per_token"] = 1 # All Concepts seem to default to 1
|
||||
embedding_info["token_dim"] = embedding_info["embedding"].size()[0]
|
||||
return embedding_info
|
||||
|
||||
@@ -329,7 +329,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
||||
resp = requests.get(url, headers=header, stream=True) # new request with range
|
||||
|
||||
if exist_size > content_length:
|
||||
print('* corrupt existing file found. re-downloading')
|
||||
print(f'* corrupt existing file found (existing_size={exist_size}, content_length={content_length}). re-downloading')
|
||||
os.remove(dest)
|
||||
exist_size = 0
|
||||
|
||||
@@ -341,15 +341,12 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
||||
elif resp.status_code == 206 or exist_size > 0:
|
||||
print(f"* {dest}: partial file found. Resuming...")
|
||||
elif resp.status_code != 200:
|
||||
print(f"** An error occurred during downloading {dest}: {resp.reason}")
|
||||
print(f"** An error occurred while downloading {url}: {resp.reason}")
|
||||
return None
|
||||
else:
|
||||
print(f"* {dest}: Downloading...")
|
||||
|
||||
try:
|
||||
if content_length < 2000:
|
||||
print(f"*** ERROR DOWNLOADING {url}: {resp.text}")
|
||||
return None
|
||||
|
||||
with open(dest, open_mode) as file, tqdm(
|
||||
desc=str(dest),
|
||||
initial=exist_size,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user