mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 00:48:03 -05:00
Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd74f51384 | ||
|
|
1e5a44a474 | ||
|
|
78ea5d773d | ||
|
|
7547784e98 | ||
|
|
e82641d5f9 | ||
|
|
993baadc22 | ||
|
|
ccfb0b94b9 | ||
|
|
352805d607 | ||
|
|
4145e27ce6 | ||
|
|
3d4f4b677f | ||
|
|
249173faf5 | ||
|
|
794ef868af | ||
|
|
a1ed22517f | ||
|
|
3765ee9b59 | ||
|
|
46e578e1ef | ||
|
|
3a8ef0a00c | ||
|
|
cf262dd2ea | ||
|
|
b0b0c48d8a | ||
|
|
8404e06d77 | ||
|
|
a91d01c27a | ||
|
|
5eeca47887 | ||
|
|
66b361294b | ||
|
|
0fb1e79a0b | ||
|
|
14f1efaf4f | ||
|
|
23aa17e387 | ||
|
|
f23cc54e1b | ||
|
|
e3d992d5d7 | ||
|
|
bb972b2e3d | ||
|
|
41a8fdea53 | ||
|
|
a78ff86e42 | ||
|
|
8e2fd4c96a | ||
|
|
2f424f29a0 | ||
|
|
90f00db032 | ||
|
|
77a63e5310 | ||
|
|
8f921741a5 | ||
|
|
071df30597 | ||
|
|
589a817952 | ||
|
|
dcb21c0f46 | ||
|
|
1cb88960fe | ||
|
|
610a1483b7 | ||
|
|
b4e7fc0d1d | ||
|
|
b792b7d68c | ||
|
|
abaa91195d | ||
|
|
1806bfb755 | ||
|
|
7377855c02 | ||
|
|
5f2a6f24cf | ||
|
|
5b8b92d957 | ||
|
|
352202a7bc | ||
|
|
82144de85f | ||
|
|
b70d713e89 | ||
|
|
e39dde4140 | ||
|
|
c151541703 | ||
|
|
29b348ece1 | ||
|
|
9f7c86c33e | ||
|
|
a79d40519c | ||
|
|
4515d52a42 | ||
|
|
2a8513eee0 | ||
|
|
b856fac713 | ||
|
|
4a3951681c | ||
|
|
ba89444e36 | ||
|
|
a044403ac3 | ||
|
|
16dea46b79 | ||
|
|
1f80b5335b | ||
|
|
eee7f13771 | ||
|
|
6db509a4ff | ||
|
|
b7965e1ee6 | ||
|
|
c3d292e8f9 | ||
|
|
206593ec99 | ||
|
|
1b62c781d7 | ||
|
|
c4de509983 |
@@ -145,7 +145,7 @@ not supported.
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
```
|
||||
|
||||
_For Macintoshes, either Intel or M1/M2:_
|
||||
|
||||
Binary file not shown.
@@ -1,164 +0,0 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git, this step will be skipped.
|
||||
|
||||
@rem Next, it'll download the project's source code.
|
||||
@rem Then it will download a self-contained, standalone Python and unpack it.
|
||||
@rem Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
@rem (if necessary) install git into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
|
||||
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
|
||||
|
||||
@rem test the mamba binary
|
||||
echo ***** Micromamba version: *****
|
||||
call micromamba.exe --version
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
|
||||
|
||||
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
del /q micromamba.exe
|
||||
|
||||
@rem For 'git' only
|
||||
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
||||
|
||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
||||
set err_msg=----- InvokeAI source download failed -----
|
||||
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
|
||||
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- InvokeAI source unpack failed -----
|
||||
tar -zxf InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q InvokeAI.tgz
|
||||
|
||||
set err_msg=----- InvokeAI source copy failed -----
|
||||
cd InvokeAI-*
|
||||
xcopy . .. /e /h
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
cd ..
|
||||
|
||||
@rem cleanup
|
||||
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
|
||||
rd /s /q .dev_scripts .github docker-build tests
|
||||
del /q requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo ***** Unpacked InvokeAI source *****
|
||||
|
||||
@rem Download/unpack/clean up python-build-standalone
|
||||
set err_msg=----- Python download failed -----
|
||||
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- Python unpack failed -----
|
||||
tar -zxf python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q python.tgz
|
||||
|
||||
echo ***** Unpacked python-build-standalone *****
|
||||
|
||||
@rem create venv
|
||||
set err_msg=----- problem creating venv -----
|
||||
.\python\python -E -s -m venv .venv
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo ***** Created Python virtual environment *****
|
||||
|
||||
@rem Print venv's Python version
|
||||
set err_msg=----- problem calling venv's python -----
|
||||
echo We're running under
|
||||
.venv\Scripts\python --version
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- pip update failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Updated pip and wheel *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Installed Python dependencies *****
|
||||
|
||||
set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||
echo ***** Installed invoke launcher script ******
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q binary_installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
||||
set err_msg=----- model download clone failed -----
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
deactivate
|
||||
|
||||
echo ***** Finished downloading models *****
|
||||
|
||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
||||
pause
|
||||
exit
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
pause
|
||||
exit
|
||||
@@ -1,235 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# This script will install git (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git, this step will be skipped.
|
||||
|
||||
# Next, it'll download the project's source code.
|
||||
# Then it will download a self-contained, standalone Python and unpack it.
|
||||
# Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||
|
||||
export no_cache_dir="--no-cache-dir"
|
||||
if [ $# -ge 1 ]; then
|
||||
if [ "$1" = "use-cache" ]; then
|
||||
export no_cache_dir=""
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="darwin";;
|
||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) ;;
|
||||
arm64*) ;;
|
||||
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
MAMBA_OS_NAME=$OS_NAME
|
||||
MAMBA_ARCH=$OS_ARCH
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
MAMBA_OS_NAME="osx"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "linux" ]; then
|
||||
MAMBA_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "x86_64" ]; then
|
||||
MAMBA_ARCH="64"
|
||||
fi
|
||||
|
||||
PY_ARCH=$OS_ARCH
|
||||
if [ "$OS_ARCH" == "arm64" ]; then
|
||||
PY_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
# Compute device ('cd' segment of reqs files) detect goes here
|
||||
# This needs a ton of work
|
||||
# Suggestions:
|
||||
# - lspci
|
||||
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
|
||||
# - Surely there's a similar utility for AMD?
|
||||
CD="cuda"
|
||||
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
CD="mps"
|
||||
fi
|
||||
|
||||
# config
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
|
||||
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
|
||||
|
||||
chmod u+x ./micromamba
|
||||
|
||||
# test the mamba binary
|
||||
echo -e "\n***** Micromamba version: *****\n"
|
||||
./micromamba --version
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
||||
|
||||
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -f micromamba.exe
|
||||
|
||||
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
|
||||
|
||||
# Download/unpack/clean up InvokeAI release sourceball
|
||||
_err_msg="\n----- InvokeAI source download failed -----\n"
|
||||
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- InvokeAI source unpack failed -----\n"
|
||||
tar -zxf InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f InvokeAI.tgz
|
||||
|
||||
_err_msg="\n----- InvokeAI source copy failed -----\n"
|
||||
cd InvokeAI-*
|
||||
cp -r . ..
|
||||
_err_exit $? _err_msg
|
||||
cd ..
|
||||
|
||||
# cleanup
|
||||
rm -rf InvokeAI-*/
|
||||
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo -e "\n***** Unpacked InvokeAI source *****\n"
|
||||
|
||||
# Download/unpack/clean up python-build-standalone
|
||||
_err_msg="\n----- Python download failed -----\n"
|
||||
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- Python unpack failed -----\n"
|
||||
tar -zxf python.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f python.tgz
|
||||
|
||||
echo -e "\n***** Unpacked python-build-standalone *****\n"
|
||||
|
||||
# create venv
|
||||
_err_msg="\n----- problem creating venv -----\n"
|
||||
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
# patch sysconfig so that extensions can build properly
|
||||
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
|
||||
PYTHON_INSTALL_DIR="$(pwd)/python"
|
||||
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
|
||||
TMPFILE="$(mktemp)"
|
||||
chmod +w "${SYSCONFIG}"
|
||||
cp "${SYSCONFIG}" "${TMPFILE}"
|
||||
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
|
||||
rm -f "${TMPFILE}"
|
||||
fi
|
||||
|
||||
./python/bin/python3 -E -s -m venv .venv
|
||||
_err_exit $? _err_msg
|
||||
source .venv/bin/activate
|
||||
|
||||
echo -e "\n***** Created Python virtual environment *****\n"
|
||||
|
||||
# Print venv's Python version
|
||||
_err_msg="\n----- problem calling venv's python -----\n"
|
||||
echo -e "We're running under"
|
||||
.venv/bin/python3 --version
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+rx ./invoke.sh
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
rm -rf binary_installer/ installer_files/
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/configure_invokeai.py
|
||||
_err_msg="\n----- model download clone failed -----\n"
|
||||
_err_exit $? _err_msg
|
||||
deactivate
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo "All done! Run the command"
|
||||
echo " $scriptdir/invoke.sh"
|
||||
echo "to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
@@ -1,36 +0,0 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo OR
|
||||
echo 3. open the developer console
|
||||
set /p choice="Please enter 1, 2 or 3: "
|
||||
if /i "%choice%" == "1" (
|
||||
echo Starting the InvokeAI command-line.
|
||||
.venv\Scripts\python scripts\invoke.py %*
|
||||
) else if /i "%choice%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI.
|
||||
.venv\Scripts\python scripts\invoke.py --web %*
|
||||
) else if /i "%choice%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) else (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
deactivate
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -eu
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "OR"
|
||||
echo "3. open the developer console"
|
||||
echo "Please enter 1, 2, or 3:"
|
||||
read choice
|
||||
|
||||
case $choice in
|
||||
1)
|
||||
printf "\nStarting the InvokeAI command-line..\n";
|
||||
.venv/bin/python scripts/invoke.py $*;
|
||||
;;
|
||||
2)
|
||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
||||
.venv/bin/python scripts/invoke.py --web $*;
|
||||
;;
|
||||
3)
|
||||
printf "\nDeveloper Console:\n";
|
||||
printf "Python command is:\n\t";
|
||||
which python;
|
||||
printf "Python version is:\n\t";
|
||||
python --version;
|
||||
echo "*************************"
|
||||
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
|
||||
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
|
||||
printf "*************************\n"
|
||||
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
|
||||
/usr/bin/env "$SHELL";
|
||||
;;
|
||||
*)
|
||||
echo "Invalid selection";
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
@@ -1,33 +0,0 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
accelerate~=0.15
|
||||
albumentations
|
||||
diffusers[torch]~=0.11
|
||||
einops
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui==1.0.3
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
realesrgan
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torch==1.12.1 ; platform_system == 'Darwin'
|
||||
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
||||
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
transformers
|
||||
picklescan
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Concepts Library
|
||||
title: Styles and Subjects
|
||||
---
|
||||
|
||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
@@ -109,21 +109,43 @@ For example, TI files generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
||||
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
||||
files it finds there. At startup you will see a message similar to this one:
|
||||
files it finds there. At startup you will see messages similar to these:
|
||||
|
||||
```bash
|
||||
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
||||
>> Loading embeddings from /data/lstein/invokeai-2.3/embeddings
|
||||
| Loading v1 embedding file: style-hamunaptra
|
||||
| Loading v4 embedding file: embeddings/learned_embeds-steps-500.bin
|
||||
| Loading v2 embedding file: lfa
|
||||
| Loading v3 embedding file: easynegative
|
||||
| Loading v1 embedding file: rem_rezero
|
||||
| Loading v2 embedding file: midj-strong
|
||||
| Loading v4 embedding file: anime-background-style-v2/learned_embeds.bin
|
||||
| Loading v4 embedding file: kamon-style/learned_embeds.bin
|
||||
** Notice: kamon-style/learned_embeds.bin was trained on a model with an incompatible token dimension: 768 vs 1024.
|
||||
>> Textual inversion triggers: <anime-background-style-v2>, <easynegative>, <lfa>, <midj-strong>, <milo>, Rem3-2600, Style-Hamunaptra
|
||||
```
|
||||
|
||||
Note the `*` trigger term. This is a placeholder term that many early TI
|
||||
tutorials taught people to use rather than a more descriptive term.
|
||||
Unfortunately, if you have multiple TI files that all use this term, only the
|
||||
first one loaded will be triggered by use of the term.
|
||||
Textual Inversion embeddings trained on version 1.X stable diffusion
|
||||
models are incompatible with version 2.X models and vice-versa.
|
||||
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
||||
or more TI files together. If it encounters a collision of terms, the script
|
||||
will prompt you to select new terms that do not collide. See
|
||||
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
After the embeddings load, InvokeAI will print out a list of all the
|
||||
recognized trigger terms. To trigger the term, include it in the
|
||||
prompt exactly as written, including angle brackets if any and
|
||||
respecting the capitalization.
|
||||
|
||||
There are at least four different embedding file formats, and each uses
|
||||
a different convention for the trigger terms. In some cases, the
|
||||
trigger term is specified in the file contents and may or may not be
|
||||
surrounded by angle brackets. In the example above, `Rem3-2600`,
|
||||
`Style-Hamunaptra`, and `<midj-strong>` were specified this way and
|
||||
there is no easy way to change the term.
|
||||
|
||||
In other cases the trigger term is not contained within the embedding
|
||||
file. In this case, InvokeAI constructs a trigger term consisting of
|
||||
the base name of the file (without the file extension) surrounded by
|
||||
angle brackets. In the example above `<easynegative`> is such a file
|
||||
(the filename was `easynegative.safetensors`). In such cases, you can
|
||||
change the trigger term simply by renaming the file.
|
||||
|
||||
## Further Reading
|
||||
|
||||
|
||||
@@ -154,8 +154,11 @@ training sets will converge with 2000-3000 steps.
|
||||
|
||||
This adjusts how many training images are processed simultaneously in
|
||||
each step. Higher values will cause the training process to run more
|
||||
quickly, but use more memory. The default size will run with GPUs with
|
||||
as little as 12 GB.
|
||||
quickly, but use more memory. The default size is selected based on
|
||||
whether you have the `xformers` memory-efficient attention library
|
||||
installed. If `xformers` is available, the batch size will be 8,
|
||||
otherwise 3. These values were chosen to allow training to run with
|
||||
GPUs with as little as 12 GB VRAM.
|
||||
|
||||
### Learning rate
|
||||
|
||||
@@ -172,8 +175,10 @@ learning rate to improve performance.
|
||||
|
||||
### Use xformers acceleration
|
||||
|
||||
This will activate XFormers memory-efficient attention. You need to
|
||||
have XFormers installed for this to have an effect.
|
||||
This will activate XFormers memory-efficient attention, which will
|
||||
reduce memory requirements by half or more and allow you to select a
|
||||
higher batch size. You need to have XFormers installed for this to
|
||||
have an effect.
|
||||
|
||||
### Learning rate scheduler
|
||||
|
||||
@@ -250,6 +255,49 @@ invokeai-ti \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Using Distributed Training
|
||||
|
||||
If you have multiple GPUs on one machine, or a cluster of GPU-enabled
|
||||
machines, you can activate distributed training. See the [HuggingFace
|
||||
Accelerate pages](https://huggingface.co/docs/accelerate/index) for
|
||||
full information, but the basic recipe is:
|
||||
|
||||
1. Enter the InvokeAI developer's console command line by selecting
|
||||
option [8] from the `invoke.sh`/`invoke.bat` script.
|
||||
|
||||
2. Configurate Accelerate using `accelerate config`:
|
||||
```sh
|
||||
accelerate config
|
||||
```
|
||||
This will guide you through the configuration process, including
|
||||
specifying how many machines you will run training on and the number
|
||||
of GPUs pe rmachine.
|
||||
|
||||
You only need to do this once.
|
||||
|
||||
3. Launch training from the command line using `accelerate launch`. Be sure
|
||||
that your current working directory is the InvokeAI root directory (usually
|
||||
named `invokeai` in your home directory):
|
||||
|
||||
```sh
|
||||
accelerate launch .venv/bin/invokeai-ti \
|
||||
--model=stable-diffusion-1.5 \
|
||||
--resolution=512 \
|
||||
--learnable_property=object \
|
||||
--initializer_token='*' \
|
||||
--placeholder_token='<shraddha>' \
|
||||
--train_data_dir=/home/lstein/invokeai/text-inversion-training-data/shraddha \
|
||||
--output_dir=/home/lstein/invokeai/text-inversion-training/shraddha \
|
||||
--scale_lr \
|
||||
--train_batch_size=10 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--max_train_steps=2000 \
|
||||
--learning_rate=0.0005 \
|
||||
--lr_scheduler=constant \
|
||||
--mixed_precision=fp16 \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Using Embeddings
|
||||
|
||||
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||
|
||||
@@ -20,6 +20,8 @@ title: Overview
|
||||
|
||||
Scriptable access to InvokeAI's features.
|
||||
|
||||
- [Visual Manual for InvokeAI](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
|
||||
|
||||
- Image Generation
|
||||
|
||||
- [Prompt Engineering](PROMPTS.md)
|
||||
|
||||
154
docs/index.md
154
docs/index.md
@@ -142,6 +142,10 @@ This method is recommended for those familiar with running Docker containers
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
- [Visual Manual for InvokeAI v2.3.1](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
|
||||
|
||||
<!-- separator -->
|
||||
|
||||
<!-- separator -->
|
||||
|
||||
### The InvokeAI Command Line Interface
|
||||
@@ -165,7 +169,7 @@ This method is recommended for those familiar with running Docker containers
|
||||
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Adding custom styles and subjects via embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
<!-- seperator -->
|
||||
@@ -177,6 +181,154 @@ This method is recommended for those familiar with running Docker containers
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
### v2.3.3 <small>(29 March 2023)</small>
|
||||
|
||||
#### Bug Fixes
|
||||
1. When using legacy checkpoints with an external VAE, the VAE file is now scanned for malware prior to loading. Previously only the main model weights file was scanned.
|
||||
2. Textual inversion will select an appropriate batchsize based on whether `xformers` is active, and will default to `xformers` enabled if the library is detected.
|
||||
3. The batch script log file names have been fixed to be compatible with Windows.
|
||||
4. Occasional corruption of the `.next_prefix` file (which stores the next output file name in sequence) on Windows systems is now detected and corrected.
|
||||
5. An infinite loop when opening the developer's console from within the `invoke.sh` script has been corrected.
|
||||
|
||||
#### Enhancements
|
||||
1. It is now possible to load and run several community-contributed SD-2.0 based models, including the infamous "Illuminati" model.
|
||||
2. The "NegativePrompts" embedding file, and others like it, can now be loaded by placing it in the InvokeAI `embeddings` directory.
|
||||
3. If no `--model` is specified at launch time, InvokeAI will remember the last model used and restore it the next time it is launched.
|
||||
4. On Linux systems, the `invoke.sh` launcher now uses a prettier console-based interface. To take advantage of it, install the `dialog` package using your package manager (e.g. `sudo apt install dialog`).
|
||||
5. When loading legacy models (safetensors/ckpt) you can specify a custom config file and/or a VAE by placing like-named files in the same directory as the model following this example:
|
||||
```
|
||||
my-favorite-model.ckpt
|
||||
my-favorite-model.yaml
|
||||
my-favorite-model.vae.pt # or my-favorite-model.vae.safetensors
|
||||
```
|
||||
|
||||
### v2.3.2 <small>(13 March 2023)</small>
|
||||
|
||||
#### Bugfixes
|
||||
|
||||
Since version 2.3.1 the following bugs have been fixed:
|
||||
|
||||
1. Black images appearing for potential NSFW images when generating with legacy checkpoint models and both `--no-nsfw_checker` and `--ckpt_convert` turned on.
|
||||
2. Black images appearing when generating from models fine-tuned on Stable-Diffusion-2-1-base. When importing V2-derived models, you may be asked to select whether the model was derived from a "base" model (512 pixels) or the 768-pixel SD-2.1 model.
|
||||
3. The "Use All" button was not restoring the Hi-Res Fix setting on the WebUI
|
||||
4. When using the model installer console app, models failed to import correctly when importing from directories with spaces in their names. A similar issue with the output directory was also fixed.
|
||||
5. Crashes that occurred during model merging.
|
||||
6. Restore previous naming of Stable Diffusion base and 768 models.
|
||||
7. Upgraded to latest versions of `diffusers`, `transformers`, `safetensors` and `accelerate` libraries upstream. We hope that this will fix the `assertion NDArray > 2**32` issue that MacOS users have had when generating images larger than 768x768 pixels. Please report back.
|
||||
|
||||
As part of the upgrade to `diffusers`, the location of the diffusers-based models has changed from `models/diffusers` to `models/hub`. When you launch InvokeAI for the first time, it will prompt you to OK a one-time move. This should be quick and harmless, but if you have modified your `models/diffusers` directory in some way, for example using symlinks, you may wish to cancel the migration and make appropriate adjustments.
|
||||
|
||||
#### New "Invokeai-batch" script
|
||||
|
||||
2.3.2 introduces a new command-line only script called
|
||||
`invokeai-batch` that can be used to generate hundreds of images from
|
||||
prompts and settings that vary systematically. This can be used to try
|
||||
the same prompt across multiple combinations of models, steps, CFG
|
||||
settings and so forth. It also allows you to template prompts and
|
||||
generate a combinatorial list like: ``` a shack in the mountains,
|
||||
photograph a shack in the mountains, watercolor a shack in the
|
||||
mountains, oil painting a chalet in the mountains, photograph a chalet
|
||||
in the mountains, watercolor a chalet in the mountains, oil painting a
|
||||
shack in the desert, photograph ... ```
|
||||
|
||||
If you have a system with multiple GPUs, or a single GPU with lots of
|
||||
VRAM, you can parallelize generation across the combinatorial set,
|
||||
reducing wait times and using your system's resources efficiently
|
||||
(make sure you have good GPU cooling).
|
||||
|
||||
To try `invokeai-batch` out. Launch the "developer's console" using
|
||||
the `invoke` launcher script, or activate the invokeai virtual
|
||||
environment manually. From the console, give the command
|
||||
`invokeai-batch --help` in order to learn how the script works and
|
||||
create your first template file for dynamic prompt generation.
|
||||
|
||||
### v2.3.1 <small>(26 February 2023)</small>
|
||||
|
||||
This is primarily a bugfix release, but it does provide several new features that will improve the user experience.
|
||||
|
||||
#### Enhanced support for model management
|
||||
|
||||
InvokeAI now makes it convenient to add, remove and modify models. You can individually import models that are stored on your local system, scan an entire folder and its subfolders for models and import them automatically, and even directly import models from the internet by providing their download URLs. You also have the option of designating a local folder to scan for new models each time InvokeAI is restarted.
|
||||
|
||||
There are three ways of accessing the model management features:
|
||||
|
||||
1. ***From the WebUI***, click on the cube to the right of the model selection menu. This will bring up a form that allows you to import models individually from your local disk or scan a directory for models to import.
|
||||
|
||||

|
||||
|
||||
2. **Using the Model Installer App**
|
||||
|
||||
Choose option (5) _download and install models_ from the `invoke` launcher script to start a new console-based application for model management. You can use this to select from a curated set of starter models, or import checkpoint, safetensors, and diffusers models from a local disk or the internet. The example below shows importing two checkpoint URLs from popular SD sites and a HuggingFace diffusers model using its Repository ID. It also shows how to designate a folder to be scanned at startup time for new models to import.
|
||||
|
||||
Command-line users can start this app using the command `invokeai-model-install`.
|
||||
|
||||

|
||||
|
||||
3. **Using the Command Line Client (CLI)**
|
||||
|
||||
The `!install_model` and `!convert_model` commands have been enhanced to allow entering of URLs and local directories to scan and import. The first command installs .ckpt and .safetensors files as-is. The second one converts them into the faster diffusers format before installation.
|
||||
|
||||
Internally InvokeAI is able to probe the contents of a .ckpt or .safetensors file to distinguish among v1.x, v2.x and inpainting models. This means that you do **not** need to include "inpaint" in your model names to use an inpainting model. Note that Stable Diffusion v2.x models will be autoconverted into a diffusers model the first time you use it.
|
||||
|
||||
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management.
|
||||
|
||||
#### An Improved Installer Experience
|
||||
|
||||
The installer now launches a console-based UI for setting and changing commonly-used startup options:
|
||||
|
||||

|
||||
|
||||
After selecting the desired options, the installer installs several support models needed by InvokeAI's face reconstruction and upscaling features and then launches the interface for selecting and installing models shown earlier. At any time, you can edit the startup options by launching `invoke.sh`/`invoke.bat` and entering option (6) _change InvokeAI startup options_
|
||||
|
||||
Command-line users can launch the new configure app using `invokeai-configure`.
|
||||
|
||||
This release also comes with a renewed updater. To do an update without going through a whole reinstallation, launch `invoke.sh` or `invoke.bat` and choose option (9) _update InvokeAI_ . This will bring you to a screen that prompts you to update to the latest released version, to the most current development version, or any released or unreleased version you choose by selecting the tag or branch of the desired version.
|
||||
|
||||

|
||||
|
||||
Command-line users can run this interface by typing `invokeai-configure`
|
||||
|
||||
#### Image Symmetry Options
|
||||
|
||||
There are now features to generate horizontal and vertical symmetry during generation. The way these work is to wait until a selected step in the generation process and then to turn on a mirror image effect. In addition to generating some cool images, you can also use this to make side-by-side comparisons of how an image will look with more or fewer steps. Access this option from the WebUI by selecting _Symmetry_ from the image generation settings, or within the CLI by using the options `--h_symmetry_time_pct` and `--v_symmetry_time_pct` (these can be abbreviated to `--h_sym` and `--v_sym` like all other options).
|
||||
|
||||

|
||||
|
||||
#### A New Unified Canvas Look
|
||||
|
||||
This release introduces a beta version of the WebUI Unified Canvas. To try it out, open up the settings dialogue in the WebUI (gear icon) and select _Use Canvas Beta Layout_:
|
||||
|
||||

|
||||
|
||||
Refresh the screen and go to to Unified Canvas (left side of screen, third icon from the top). The new layout is designed to provide more space to work in and to keep the image controls close to the image itself:
|
||||
|
||||

|
||||
|
||||
#### Model conversion and merging within the WebUI
|
||||
|
||||
The WebUI now has an intuitive interface for model merging, as well as for permanent conversion of models from legacy .ckpt/.safetensors formats into diffusers format. These options are also available directly from the `invoke.sh`/`invoke.bat` scripts.
|
||||
|
||||
#### An easier way to contribute translations to the WebUI
|
||||
|
||||
We have migrated our translation efforts to [Weblate](https://hosted.weblate.org/engage/invokeai/), a FOSS translation product. Maintaining the growing project's translations is now far simpler for the maintainers and community. Please review our brief [translation guide](https://github.com/invoke-ai/InvokeAI/blob/v2.3.1/docs/other/TRANSLATION.md) for more information on how to contribute.
|
||||
|
||||
#### Numerous internal bugfixes and performance issues
|
||||
|
||||
This releases quashes multiple bugs that were reported in 2.3.0. Major internal changes include upgrading to `diffusers 0.13.0`, and using the `compel` library for prompt parsing. See [Detailed Change Log](#full-change-log) for a detailed list of bugs caught and squished.
|
||||
|
||||
#### Summary of InvokeAI command line scripts (all accessible via the launcher menu)
|
||||
|
||||
| Command | Description |
|
||||
|--------------------------|---------------------------------------------------------------------|
|
||||
| `invokeai` | Command line interface |
|
||||
| `invokeai --web` | Web interface |
|
||||
| `invokeai-model-install` | Model installer with console forms-based front end |
|
||||
| `invokeai-ti --gui` | Textual inversion, with a console forms-based front end |
|
||||
| `invokeai-merge --gui` | Model merging, with a console forms-based front end |
|
||||
| `invokeai-configure` | Startup configuration; can also be used to reinstall support models |
|
||||
| `invokeai-update` | InvokeAI software updater |
|
||||
|
||||
|
||||
### v2.3.0 <small>(9 February 2023)</small>
|
||||
|
||||
#### Migration to Stable Diffusion `diffusers` models
|
||||
|
||||
@@ -417,7 +417,7 @@ Then type the following commands:
|
||||
|
||||
=== "AMD System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
```
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
@@ -77,7 +77,7 @@ machine. To test, open up a terminal window and issue the following
|
||||
command:
|
||||
|
||||
```
|
||||
rocm-smi
|
||||
rocminfo
|
||||
```
|
||||
|
||||
If you get a table labeled "ROCm System Management Interface" the
|
||||
@@ -95,9 +95,17 @@ recent version of Ubuntu, 22.04. However, this [community-contributed
|
||||
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
|
||||
to work well.
|
||||
|
||||
After installation, please run `rocm-smi` a second time to confirm
|
||||
After installation, please run `rocminfo` a second time to confirm
|
||||
that the driver is present and the GPU is recognized. You may need to
|
||||
do a reboot in order to load the driver.
|
||||
do a reboot in order to load the driver. In addition, if you see
|
||||
errors relating to your username not being a member of the `render`
|
||||
group, you may fix this by adding yourself to this group with the command:
|
||||
|
||||
```
|
||||
sudo usermod -a -G render myUserName
|
||||
```
|
||||
|
||||
(Thanks to @EgoringKosmos for the usermod recipe.)
|
||||
|
||||
### Linux Install with a ROCm-docker Container
|
||||
|
||||
@@ -110,7 +118,7 @@ recipes are available
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
This will be done automatically for you if you use the installer
|
||||
|
||||
@@ -11,7 +11,7 @@ The model checkpoint files ('\*.ckpt') are the Stable Diffusion
|
||||
captioned images gathered from multiple sources.
|
||||
|
||||
Originally there was only a single Stable Diffusion weights file,
|
||||
which many people named `model.ckpt`. Now there are dozens or more
|
||||
which many people named `model.ckpt`. Now there are hundreds
|
||||
that have been fine tuned to provide particulary styles, genres, or
|
||||
other features. In addition, there are several new formats that
|
||||
improve on the original checkpoint format: a `.safetensors` format
|
||||
@@ -29,9 +29,10 @@ and performance are being made at a rapid pace. Among other features
|
||||
is the ability to download and install a `diffusers` model just by
|
||||
providing its HuggingFace repository ID.
|
||||
|
||||
While InvokeAI will continue to support `.ckpt` and `.safetensors`
|
||||
While InvokeAI will continue to support legacy `.ckpt` and `.safetensors`
|
||||
models for the near future, these are deprecated and support will
|
||||
likely be withdrawn at some point in the not-too-distant future.
|
||||
be withdrawn in version 3.0, after which all legacy models will be
|
||||
converted into diffusers at the time they are loaded.
|
||||
|
||||
This manual will guide you through installing and configuring model
|
||||
weight files and converting legacy `.ckpt` and `.safetensors` files
|
||||
@@ -89,15 +90,18 @@ aware that CIVITAI hosts many models that generate NSFW content.
|
||||
!!! note
|
||||
|
||||
InvokeAI 2.3.x does not support directly importing and
|
||||
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||
convert them into `diffusers` models using the conversion methods
|
||||
described below.
|
||||
running Stable Diffusion version 2 checkpoint models. If you
|
||||
try to import them, they will be automatically
|
||||
converted into `diffusers` models on the fly. This adds about 20s
|
||||
to loading time. To avoid this overhead, you are encouraged to
|
||||
use one of the conversion methods described below to convert them
|
||||
permanently.
|
||||
|
||||
## Installation
|
||||
|
||||
There are multiple ways to install and manage models:
|
||||
|
||||
1. The `invokeai-configure` script which will download and install them for you.
|
||||
1. The `invokeai-model-install` script which will download and install them for you.
|
||||
|
||||
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
|
||||
models files.
|
||||
@@ -105,14 +109,41 @@ There are multiple ways to install and manage models:
|
||||
3. The web interface (WebUI) has a GUI for importing and managing
|
||||
models.
|
||||
|
||||
### Installation via `invokeai-configure`
|
||||
### Installation via `invokeai-model-install`
|
||||
|
||||
From the `invoke` launcher, choose option (6) "re-run the configure
|
||||
script to download new models." This will launch the same script that
|
||||
prompted you to select models at install time. You can use this to add
|
||||
models that you skipped the first time around. It is all right to
|
||||
specify a model that was previously downloaded; the script will just
|
||||
confirm that the files are complete.
|
||||
From the `invoke` launcher, choose option (5) "Download and install
|
||||
models." This will launch the same script that prompted you to select
|
||||
models at install time. You can use this to add models that you
|
||||
skipped the first time around. It is all right to specify a model that
|
||||
was previously downloaded; the script will just confirm that the files
|
||||
are complete.
|
||||
|
||||
This script allows you to load 3d party models. Look for a large text
|
||||
entry box labeled "IMPORT LOCAL AND REMOTE MODELS." In this box, you
|
||||
can cut and paste one or more of any of the following:
|
||||
|
||||
1. A URL that points to a downloadable .ckpt or .safetensors file.
|
||||
2. A file path pointing to a .ckpt or .safetensors file.
|
||||
3. A diffusers model repo_id (from HuggingFace) in the format
|
||||
"owner/repo_name".
|
||||
4. A directory path pointing to a diffusers model directory.
|
||||
5. A directory path pointing to a directory containing a bunch of
|
||||
.ckpt and .safetensors files. All will be imported.
|
||||
|
||||
You can enter multiple items into the textbox, each one on a separate
|
||||
line. You can paste into the textbox using ctrl-shift-V or by dragging
|
||||
and dropping a file/directory from the desktop into the box.
|
||||
|
||||
The script also lets you designate a directory that will be scanned
|
||||
for new model files each time InvokeAI starts up. These models will be
|
||||
added automatically.
|
||||
|
||||
Lastly, the script gives you a checkbox option to convert legacy models
|
||||
into diffusers, or to run the legacy model directly. If you choose to
|
||||
convert, the original .ckpt/.safetensors file will **not** be deleted,
|
||||
but a new diffusers directory will be created, using twice your disk
|
||||
space. However, the diffusers version will load faster, and will be
|
||||
compatible with InvokeAI 3.0.
|
||||
|
||||
### Installation via the CLI
|
||||
|
||||
@@ -144,19 +175,15 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||
For this to work, the URL must not be password-protected. Otherwise
|
||||
you will receive a 404 error.
|
||||
|
||||
When you import a legacy model, the CLI will first ask you what type
|
||||
of model this is. You can indicate whether it is a model based on
|
||||
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||
type, or it will not load correctly. You can correct the model type
|
||||
after the fact using the `!edit_model` command.
|
||||
|
||||
The system will then ask you a few other questions about the model,
|
||||
including what size image it was trained on (usually 512x512), what
|
||||
name and description you wish to use for it, and whether you would
|
||||
like to install a custom VAE (variable autoencoder) file for the
|
||||
model. For recent models, the answer to the VAE question is usually
|
||||
"no," but it won't hurt to answer "yes".
|
||||
When you import a legacy model, the CLI will try to figure out what
|
||||
type of model it is and select the correct load configuration file.
|
||||
However, one thing it can't do is to distinguish between Stable
|
||||
Diffusion 2.x models trained on 512x512 vs 768x768 images. In this
|
||||
case, the CLI will pop up a menu of choices, asking you to select
|
||||
which type of model it is. Please consult the model documentation to
|
||||
identify the correct answer, as loading with the wrong configuration
|
||||
will lead to black images. You can correct the model type after the
|
||||
fact using the `!edit_model` command.
|
||||
|
||||
After importing, the model will load. If this is successful, you will
|
||||
be asked if you want to keep the model loaded in memory to start
|
||||
@@ -211,109 +238,6 @@ description for the model, whether to make this the default model that
|
||||
is loaded at InvokeAI startup time, and whether to replace its
|
||||
VAE. Generally the answer to the latter question is "no".
|
||||
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
models file into `diffusers` and install it.This will enable the model
|
||||
to load and run faster without loss of image quality.
|
||||
|
||||
The usage is identical to `!import_model`. You may point the command
|
||||
to either a downloaded model file on disk, or to a (non-password
|
||||
protected) URL:
|
||||
|
||||
```bash
|
||||
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
After a successful conversion, the CLI will offer you the option of
|
||||
deleting the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Optimizing a previously-installed model
|
||||
|
||||
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||
file and wish to convert it into a `diffusers` model, you can do this
|
||||
without re-downloading and converting the original file using the
|
||||
`!optimize_model` command. Simply pass the short name of an existing
|
||||
installed model:
|
||||
|
||||
```bash
|
||||
invoke> !optimize_model martians-v1.0
|
||||
```
|
||||
|
||||
The model will be converted into `diffusers` format and replace the
|
||||
previously installed version. You will again be offered the
|
||||
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Related CLI Commands
|
||||
|
||||
There are a whole series of additional model management commands in
|
||||
the CLI that you can read about in [Command-Line
|
||||
Interface](../features/CLI.md). These include:
|
||||
|
||||
* `!models` - List all installed models
|
||||
* `!switch <model name>` - Switch to the indicated model
|
||||
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||
* `!del_model <model name>` - Delete the indicated model
|
||||
|
||||
### Manually editing `configs/models.yaml`
|
||||
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||
directly.
|
||||
|
||||
You will need to download the desired `.ckpt/.safetensors` file and
|
||||
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||
`diffusers` model, record the repo_id or download the whole model
|
||||
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||
application), open the file `configs/models.yaml`, and add a new
|
||||
stanza that follows this model:
|
||||
|
||||
#### A legacy model
|
||||
|
||||
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
format: ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
```
|
||||
|
||||
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||
|
||||
#### A diffusers model
|
||||
|
||||
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||
model with a repository ID:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
repo_id: captahab/arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
And for a downloaded directory:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
path: /path/to/captahab-arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
There is additional syntax for indicating an external VAE to use with
|
||||
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||
|
||||
After you save the modified `models.yaml` file relaunch
|
||||
`invokeai`. The new model will now be available for your use.
|
||||
|
||||
### Installation via the WebUI
|
||||
|
||||
To access the WebUI Model Manager, click on the button that looks like
|
||||
@@ -393,3 +317,143 @@ And here is what the same argument looks like in `invokeai.init`:
|
||||
--no-nsfw_checker
|
||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
### Specifying a configuration file for legacy checkpoints
|
||||
|
||||
Some checkpoint files come with instructions to use a specific .yaml
|
||||
configuration file. For InvokeAI load this file correctly, please put
|
||||
the config file in the same directory as the corresponding `.ckpt` or
|
||||
`.safetensors` file and make sure the file has the same basename as
|
||||
the model file. Here is an example:
|
||||
|
||||
```bash
|
||||
wonderful-model-v2.ckpt
|
||||
wonderful-model-v2.yaml
|
||||
```
|
||||
|
||||
This is not needed for `diffusers` models, which come with their own
|
||||
pre-packaged configuration.
|
||||
|
||||
### Specifying a custom VAE file for legacy checkpoints
|
||||
|
||||
To associate a custom VAE with a legacy file, place the VAE file in
|
||||
the same directory as the corresponding `.ckpt` or
|
||||
`.safetensors` file and make sure the file has the same basename as
|
||||
the model file. Use the suffix `.vae.pt` for VAE checkpoint files, and
|
||||
`.vae.safetensors` for VAE safetensors files. There is no requirement
|
||||
that both the model and the VAE follow the same format.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
wonderful-model-v2.pt
|
||||
wonderful-model-v2.vae.safetensors
|
||||
```
|
||||
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
models file into `diffusers` and install it.This will enable the model
|
||||
to load and run faster without loss of image quality.
|
||||
|
||||
The usage is identical to `!import_model`. You may point the command
|
||||
to either a downloaded model file on disk, or to a (non-password
|
||||
protected) URL:
|
||||
|
||||
```bash
|
||||
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
After a successful conversion, the CLI will offer you the option of
|
||||
deleting the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Optimizing a previously-installed model
|
||||
|
||||
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||
file and wish to convert it into a `diffusers` model, you can do this
|
||||
without re-downloading and converting the original file using the
|
||||
`!optimize_model` command. Simply pass the short name of an existing
|
||||
installed model:
|
||||
|
||||
```bash
|
||||
invoke> !optimize_model martians-v1.0
|
||||
```
|
||||
|
||||
The model will be converted into `diffusers` format and replace the
|
||||
previously installed version. You will again be offered the
|
||||
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
Alternatively you can use the WebUI's model manager to handle diffusers
|
||||
optimization. Select the legacy model you wish to convert, and then
|
||||
look for a button labeled "Convert to Diffusers" in the upper right of
|
||||
the window.
|
||||
|
||||
### Related CLI Commands
|
||||
|
||||
There are a whole series of additional model management commands in
|
||||
the CLI that you can read about in [Command-Line
|
||||
Interface](../features/CLI.md). These include:
|
||||
|
||||
* `!models` - List all installed models
|
||||
* `!switch <model name>` - Switch to the indicated model
|
||||
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||
* `!del_model <model name>` - Delete the indicated model
|
||||
|
||||
### Manually editing `configs/models.yaml`
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||
directly.
|
||||
|
||||
You will need to download the desired `.ckpt/.safetensors` file and
|
||||
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||
`diffusers` model, record the repo_id or download the whole model
|
||||
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||
application), open the file `configs/models.yaml`, and add a new
|
||||
stanza that follows this model:
|
||||
|
||||
#### A legacy model
|
||||
|
||||
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
format: ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
```
|
||||
|
||||
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||
|
||||
#### A diffusers model
|
||||
|
||||
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||
model with a repository ID:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
repo_id: captahab/arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
And for a downloaded directory:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
path: /path/to/captahab-arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
There is additional syntax for indicating an external VAE to use with
|
||||
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||
|
||||
After you save the modified `models.yaml` file relaunch
|
||||
`invokeai`. The new model will now be available for your use.
|
||||
|
||||
|
||||
@@ -23,14 +23,16 @@ We thank them for all of their time and hard work.
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
|
||||
* @genomancer (Gregg Helt) - Model training and merging
|
||||
* @gogurtenjoyer - User support and testing
|
||||
* @whosawwhatsis - User support and testing
|
||||
|
||||
## **Contributions by**
|
||||
|
||||
- [tildebyte](https://github.com/tildebyte)
|
||||
- [Sean McLellan](https://github.com/Oceanswave)
|
||||
- [Kevin Gibbons](https://github.com/bakkot)
|
||||
- [Tesseract Cat](https://github.com/TesseractCat)
|
||||
@@ -78,6 +80,7 @@ We thank them for all of their time and hard work.
|
||||
- [psychedelicious](https://github.com/psychedelicious)
|
||||
- [damian0815](https://github.com/damian0815)
|
||||
- [Eugene Brodsky](https://github.com/ebr)
|
||||
- [Statcomm](https://github.com/statcomm)
|
||||
|
||||
## **Original CompVis Authors**
|
||||
|
||||
|
||||
@@ -241,14 +241,18 @@ class InvokeAiInstance:
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
# Note that we're installing pinned versions of torch and
|
||||
# torchvision here, which *should* correspond to what is
|
||||
# in pyproject.toml. This is to prevent torch 2.0 from
|
||||
# being installed and immediately uninstalled and replaced with 1.13
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"torch",
|
||||
"torchvision",
|
||||
"torch~=1.13.1",
|
||||
"torchvision~=0.14.1",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
@@ -379,6 +383,9 @@ class InvokeAiInstance:
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
||||
|
||||
if OS == "Linux":
|
||||
shutil.copy(Path(__file__).parents[1] / "templates" / "dialogrc", self.runtime / '.dialogrc')
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
|
||||
27
installer/templates/dialogrc
Normal file
27
installer/templates/dialogrc
Normal file
@@ -0,0 +1,27 @@
|
||||
# Screen
|
||||
use_shadow = OFF
|
||||
use_colors = ON
|
||||
screen_color = (BLACK, BLACK, ON)
|
||||
|
||||
# Box
|
||||
dialog_color = (YELLOW, BLACK , ON)
|
||||
title_color = (YELLOW, BLACK, ON)
|
||||
border_color = (YELLOW, BLACK, OFF)
|
||||
border2_color = (YELLOW, BLACK, OFF)
|
||||
|
||||
# Button
|
||||
button_active_color = (RED, BLACK, OFF)
|
||||
button_inactive_color = (YELLOW, BLACK, OFF)
|
||||
button_label_active_color = (YELLOW,BLACK,ON)
|
||||
button_label_inactive_color = (YELLOW,BLACK,ON)
|
||||
|
||||
# Menu box
|
||||
menubox_color = (BLACK, BLACK, ON)
|
||||
menubox_border_color = (YELLOW, BLACK, OFF)
|
||||
menubox_border2_color = (YELLOW, BLACK, OFF)
|
||||
|
||||
# Menu window
|
||||
item_color = (YELLOW, BLACK, OFF)
|
||||
item_selected_color = (BLACK, YELLOW, OFF)
|
||||
tag_key_color = (YELLOW, BLACK, OFF)
|
||||
tag_key_selected_color = (BLACK, YELLOW, OFF)
|
||||
@@ -1,5 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
# MIT License
|
||||
|
||||
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
|
||||
# Copyright 2023, The InvokeAI Development Team
|
||||
|
||||
####
|
||||
# This launch script assumes that:
|
||||
# 1. it is located in the runtime directory,
|
||||
@@ -11,85 +16,168 @@
|
||||
|
||||
set -eu
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
# Ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
export INVOKEAI_ROOT="$scriptdir"
|
||||
PARAMS=$@
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
# Check to see if dialog is installed (it seems to be fairly standard, but good to check regardless) and if the user has passed the --no-tui argument to disable the dialog TUI
|
||||
tui=true
|
||||
if command -v dialog &>/dev/null; then
|
||||
# This must use $@ to properly loop through the arguments passed by the user
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" == "--no-tui" ]; then
|
||||
tui=false
|
||||
# Remove the --no-tui argument to avoid errors later on when passing arguments to InvokeAI
|
||||
PARAMS=$(echo "$PARAMS" | sed 's/--no-tui//')
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
tui=false
|
||||
fi
|
||||
|
||||
# Set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
while true
|
||||
do
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line interface"
|
||||
echo "2. browser-based UI"
|
||||
echo "3. run textual inversion training"
|
||||
echo "4. merge models (diffusers type only)"
|
||||
echo "5. download and install models"
|
||||
echo "6. change InvokeAI startup options"
|
||||
echo "7. re-run the configure script to fix a broken install"
|
||||
echo "8. open the developer console"
|
||||
echo "9. update InvokeAI"
|
||||
echo "10. command-line help"
|
||||
echo "Q - Quit"
|
||||
echo ""
|
||||
read -p "Please enter 1-10, Q: [2] " yn
|
||||
choice=${yn:='2'}
|
||||
case $choice in
|
||||
1)
|
||||
echo "Starting the InvokeAI command-line..."
|
||||
invokeai $@
|
||||
;;
|
||||
2)
|
||||
echo "Starting the InvokeAI browser-based UI..."
|
||||
invokeai --web $@
|
||||
;;
|
||||
3)
|
||||
echo "Starting Textual Inversion:"
|
||||
invokeai-ti --gui $@
|
||||
;;
|
||||
4)
|
||||
echo "Merging Models:"
|
||||
invokeai-merge --gui $@
|
||||
;;
|
||||
5)
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
6)
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
7)
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
;;
|
||||
8)
|
||||
echo "Developer Console:"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
9)
|
||||
echo "Update:"
|
||||
invokeai-update
|
||||
;;
|
||||
10)
|
||||
invokeai --help
|
||||
;;
|
||||
[qQ])
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Invalid selection"
|
||||
exit;;
|
||||
# Primary function for the case statement to determine user input
|
||||
do_choice() {
|
||||
case $1 in
|
||||
1)
|
||||
clear
|
||||
printf "Generate images with a browser-based interface\n"
|
||||
invokeai --web $PARAMS
|
||||
;;
|
||||
2)
|
||||
clear
|
||||
printf "Generate images using a command-line interface\n"
|
||||
invokeai $PARAMS
|
||||
;;
|
||||
3)
|
||||
clear
|
||||
printf "Textual inversion training\n"
|
||||
invokeai-ti --gui $PARAMS
|
||||
;;
|
||||
4)
|
||||
clear
|
||||
printf "Merge models (diffusers type only)\n"
|
||||
invokeai-merge --gui $PARAMS
|
||||
;;
|
||||
5)
|
||||
clear
|
||||
printf "Download and install models\n"
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
6)
|
||||
clear
|
||||
printf "Change InvokeAI startup options\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
7)
|
||||
clear
|
||||
printf "Re-run the configure script to fix a broken install\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
printf "Open the developer console\n"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
9)
|
||||
clear
|
||||
printf "Update InvokeAI\n"
|
||||
invokeai-update
|
||||
;;
|
||||
10)
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai --help
|
||||
;;
|
||||
"HELP 1")
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai --help
|
||||
;;
|
||||
*)
|
||||
clear
|
||||
printf "Exiting...\n"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
clear
|
||||
}
|
||||
|
||||
# Dialog-based TUI for launcing Invoke functions
|
||||
do_dialog() {
|
||||
options=(
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Generate images using a command-line interface"
|
||||
3 "Textual inversion training"
|
||||
4 "Merge models (diffusers type only)"
|
||||
5 "Download and install models"
|
||||
6 "Change InvokeAI startup options"
|
||||
7 "Re-run the configure script to fix a broken install"
|
||||
8 "Open the developer console"
|
||||
9 "Update InvokeAI")
|
||||
|
||||
choice=$(dialog --clear \
|
||||
--backtitle "\Zb\Zu\Z3InvokeAI" \
|
||||
--colors \
|
||||
--title "What would you like to run?" \
|
||||
--ok-label "Run" \
|
||||
--cancel-label "Exit" \
|
||||
--help-button \
|
||||
--help-label "CLI Help" \
|
||||
--menu "Select an option:" \
|
||||
0 0 0 \
|
||||
"${options[@]}" \
|
||||
2>&1 >/dev/tty) || clear
|
||||
do_choice "$choice"
|
||||
clear
|
||||
}
|
||||
|
||||
# Command-line interface for launching Invoke functions
|
||||
do_line_input() {
|
||||
clear
|
||||
printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
|
||||
printf "Do you want to generate images using the\n"
|
||||
printf "1: Browser-based UI\n"
|
||||
printf "2: Command-line interface\n"
|
||||
printf "3: Run textual inversion training\n"
|
||||
printf "4: Merge models (diffusers type only)\n"
|
||||
printf "5: Download and install models\n"
|
||||
printf "6: Change InvokeAI startup options\n"
|
||||
printf "7: Re-run the configure script to fix a broken install\n"
|
||||
printf "8: Open the developer console\n"
|
||||
printf "9: Update InvokeAI\n"
|
||||
printf "10: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
read -p "Please enter 1-10, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
do_choice $choice
|
||||
clear
|
||||
}
|
||||
|
||||
# Main IF statement for launching Invoke with either the TUI or CLI, and for checking if the user is in the developer console
|
||||
if [ "$0" != "bash" ]; then
|
||||
while true; do
|
||||
if $tui; then
|
||||
# .dialogrc must be located in the same directory as the invoke.sh script
|
||||
export DIALOGRC="./.dialogrc"
|
||||
do_dialog
|
||||
else
|
||||
do_line_input
|
||||
fi
|
||||
done
|
||||
else # in developer console
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
printf "Press ^D to exit\n"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -976,7 +976,7 @@ class Generate:
|
||||
self.generators = {}
|
||||
|
||||
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
if self.embedding_path and not model_data.get("ti_embeddings_loaded"):
|
||||
print(f'>> Loading embeddings from {self.embedding_path}')
|
||||
for root, _, files in os.walk(self.embedding_path):
|
||||
for name in files:
|
||||
@@ -984,9 +984,10 @@ class Generate:
|
||||
self.model.textual_inversion_manager.load_textual_inversion(
|
||||
ti_path, defer_injecting_tokens=True
|
||||
)
|
||||
print(
|
||||
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
|
||||
)
|
||||
model_data["ti_embeddings_loaded"] = True
|
||||
print(
|
||||
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
|
||||
)
|
||||
|
||||
self.model_name = model_name
|
||||
self._set_sampler() # requires self.model_name to be set first
|
||||
|
||||
@@ -4,6 +4,7 @@ import shlex
|
||||
import sys
|
||||
import traceback
|
||||
from argparse import Namespace
|
||||
from packaging import version
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
@@ -125,11 +126,13 @@ def main():
|
||||
print(f"{e}. Aborting.")
|
||||
sys.exit(-1)
|
||||
|
||||
model = opt.model or retrieve_last_used_model()
|
||||
|
||||
# creating a Generate object:
|
||||
try:
|
||||
gen = Generate(
|
||||
conf=opt.conf,
|
||||
model=opt.model,
|
||||
model=model,
|
||||
sampler_name=opt.sampler_name,
|
||||
embedding_path=embedding_path,
|
||||
full_precision=opt.full_precision,
|
||||
@@ -178,6 +181,7 @@ def main():
|
||||
# web server loops forever
|
||||
if opt.web or opt.gui:
|
||||
invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan)
|
||||
save_last_used_model(gen.model_name)
|
||||
sys.exit(0)
|
||||
|
||||
if not infile:
|
||||
@@ -498,6 +502,7 @@ def main_loop(gen, opt, completer):
|
||||
print(
|
||||
f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}'
|
||||
)
|
||||
save_last_used_model(gen.model_name)
|
||||
|
||||
|
||||
# TO DO: remove repetitive code and the awkward command.replace() trope
|
||||
@@ -771,14 +776,10 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
|
||||
original_config_file = Path(model_info["config"])
|
||||
model_name = model_name_or_path
|
||||
model_description = model_info["description"]
|
||||
vae = model_info["vae"]
|
||||
vae_path = model_info.get("vae")
|
||||
else:
|
||||
print(f"** {model_name_or_path} is not a legacy .ckpt weights file")
|
||||
return
|
||||
if vae_repo := ldm.invoke.model_manager.VAE_TO_REPO_ID.get(Path(vae).stem):
|
||||
vae_repo = dict(repo_id=vae_repo)
|
||||
else:
|
||||
vae_repo = None
|
||||
model_name = manager.convert_and_import(
|
||||
ckpt_path,
|
||||
diffusers_path=Path(
|
||||
@@ -787,7 +788,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
|
||||
model_name=model_name,
|
||||
model_description=model_description,
|
||||
original_config_file=original_config_file,
|
||||
vae=vae_repo,
|
||||
vae_path=vae_path,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
@@ -833,6 +834,7 @@ def edit_model(model_name: str, gen, opt, completer):
|
||||
print(f"\n>> Editing model {model_name} from configuration file {opt.conf}")
|
||||
new_name = _get_model_name(manager.list_models(), completer, model_name)
|
||||
|
||||
completer.complete_extensions(('.yaml','.ckpt','.safetensors','.pt'))
|
||||
for attribute in info.keys():
|
||||
if type(info[attribute]) != str:
|
||||
continue
|
||||
@@ -840,6 +842,7 @@ def edit_model(model_name: str, gen, opt, completer):
|
||||
continue
|
||||
completer.set_line(info[attribute])
|
||||
info[attribute] = input(f"{attribute}: ") or info[attribute]
|
||||
completer.complete_extensions(None)
|
||||
|
||||
if info["format"] == "diffusers":
|
||||
vae = info.get("vae", dict(repo_id=None, path=None, subfolder=None))
|
||||
@@ -1286,10 +1289,48 @@ def check_internet() -> bool:
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def retrieve_last_used_model()->str:
|
||||
"""
|
||||
Return name of the last model used.
|
||||
"""
|
||||
model_file_path = Path(Globals.root,'.last_model')
|
||||
if not model_file_path.exists():
|
||||
return None
|
||||
with open(model_file_path,'r') as f:
|
||||
return f.readline()
|
||||
|
||||
def save_last_used_model(model_name:str):
|
||||
"""
|
||||
Save name of the last model used.
|
||||
"""
|
||||
model_file_path = Path(Globals.root,'.last_model')
|
||||
with open(model_file_path,'w') as f:
|
||||
f.write(model_name)
|
||||
|
||||
# This routine performs any patch-ups needed after installation
|
||||
def run_patches():
|
||||
# install ckpt configuration files that may have been added to the
|
||||
# distro after original root directory configuration
|
||||
install_missing_config_files()
|
||||
version_file = Path(Globals.root,'.version')
|
||||
if version_file.exists():
|
||||
with open(version_file,'r') as f:
|
||||
root_version = version.parse(f.readline() or 'v2.3.2')
|
||||
else:
|
||||
root_version = version.parse('v2.3.2')
|
||||
app_version = version.parse(ldm.invoke.__version__)
|
||||
if root_version < app_version:
|
||||
try:
|
||||
do_version_update(root_version, ldm.invoke.__version__)
|
||||
with open(version_file,'w') as f:
|
||||
f.write(ldm.invoke.__version__)
|
||||
except:
|
||||
print("** Update failed. Will try again on next launch")
|
||||
|
||||
def install_missing_config_files():
|
||||
"""
|
||||
install ckpt configuration files that may have been added to the
|
||||
distro after original root directory configuration
|
||||
"""
|
||||
import invokeai.configs as conf
|
||||
from shutil import copyfile
|
||||
|
||||
@@ -1300,6 +1341,27 @@ def run_patches():
|
||||
if not dest.exists():
|
||||
copyfile(src,dest)
|
||||
|
||||
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||
"""
|
||||
Make any updates to the launcher .sh and .bat scripts that may be needed
|
||||
from release to release. This is not an elegant solution. Instead, the
|
||||
launcher should be moved into the source tree and installed using pip.
|
||||
"""
|
||||
if root_version < version.Version('v2.3.3'):
|
||||
if sys.platform == "linux":
|
||||
print('>> Downloading new version of launcher script and its config file')
|
||||
from ldm.util import download_with_progress_bar
|
||||
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
|
||||
|
||||
dest = Path(Globals.root,'invoke.sh.in')
|
||||
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
|
||||
dest.replace(Path(Globals.root,'invoke.sh'))
|
||||
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
|
||||
|
||||
dest = Path(Globals.root,'dialogrc')
|
||||
assert download_with_progress_bar(url_base+'dialogrc',dest)
|
||||
dest.replace(Path(Globals.root,'.dialogrc'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__='2.3.2'
|
||||
__version__='2.3.3'
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,6 +16,8 @@ from rich.text import Text
|
||||
from ldm.invoke import __version__
|
||||
|
||||
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
|
||||
INVOKE_AI_TAG="https://github.com/invoke-ai/InvokeAI/archive/refs/tags"
|
||||
INVOKE_AI_BRANCH="https://github.com/invoke-ai/InvokeAI/archive/refs/heads"
|
||||
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||
|
||||
OS = platform.uname().system
|
||||
@@ -41,7 +43,8 @@ def welcome(versions: dict):
|
||||
yield '[bold yellow]Options:'
|
||||
yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
|
||||
[2] Update to the bleeding-edge development version ([italic]main[/italic])
|
||||
[3] Manually enter the tag or branch name you wish to update'''
|
||||
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
|
||||
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to'''
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@@ -62,17 +65,26 @@ def main():
|
||||
welcome(versions)
|
||||
|
||||
tag = None
|
||||
choice = Prompt.ask('Choice:',choices=['1','2','3'],default='1')
|
||||
branch = None
|
||||
release = None
|
||||
choice = Prompt.ask('Choice:',choices=['1','2','3','4'],default='1')
|
||||
|
||||
if choice=='1':
|
||||
tag = versions[0]['tag_name']
|
||||
release = versions[0]['tag_name']
|
||||
elif choice=='2':
|
||||
tag = 'main'
|
||||
release = 'main'
|
||||
elif choice=='3':
|
||||
tag = Prompt.ask('Enter an InvokeAI tag or branch name')
|
||||
tag = Prompt.ask('Enter an InvokeAI tag name')
|
||||
elif choice=='4':
|
||||
branch = Prompt.ask('Enter an InvokeAI branch name')
|
||||
|
||||
print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]')
|
||||
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517'
|
||||
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
|
||||
if release:
|
||||
cmd = f'pip install {INVOKE_AI_SRC}/{release}.zip --use-pep517 --upgrade'
|
||||
elif tag:
|
||||
cmd = f'pip install {INVOKE_AI_TAG}/{tag}.zip --use-pep517 --upgrade'
|
||||
else:
|
||||
cmd = f'pip install {INVOKE_AI_BRANCH}/{branch}.zip --use-pep517 --upgrade'
|
||||
print('')
|
||||
print('')
|
||||
if os.system(cmd)==0:
|
||||
|
||||
@@ -29,7 +29,13 @@ Model_dir = "models"
|
||||
Weights_dir = "ldm/stable-diffusion-v1/"
|
||||
|
||||
# the initial "configs" dir is now bundled in the `invokeai.configs` package
|
||||
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
|
||||
Dataset_path = None
|
||||
for path in configs.__path__:
|
||||
file =Path(path, "INITIAL_MODELS.yaml")
|
||||
if file.exists():
|
||||
Dataset_path = file
|
||||
break
|
||||
assert Dataset_path,f"Could not find the file INITIAL_MODELS.yaml in {configs.__path__}"
|
||||
|
||||
# initial models omegaconf
|
||||
Datasets = None
|
||||
|
||||
@@ -157,7 +157,7 @@ def _run_invoke(
|
||||
):
|
||||
pid = os.getpid()
|
||||
logdir.mkdir(parents=True, exist_ok=True)
|
||||
logfile = Path(logdir, f'{time.strftime("%Y-%m-%d-%H:%M:%S")}-pid={pid}.txt')
|
||||
logfile = Path(logdir, f'{time.strftime("%Y-%m-%d_%H-%M-%S")}-pid={pid}.txt')
|
||||
print(
|
||||
f">> Process {pid} running on GPU {gpu}; logging to {logfile}", file=sys.stderr
|
||||
)
|
||||
|
||||
29
ldm/invoke/invokeai_metadata.py
Executable file
29
ldm/invoke/invokeai_metadata.py
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import json
|
||||
from ldm.invoke.pngwriter import retrieve_metadata
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
|
||||
print("This script opens up the indicated invoke.py-generated PNG file(s) and prints out their metadata.")
|
||||
exit(-1)
|
||||
|
||||
filenames = sys.argv[1:]
|
||||
for f in filenames:
|
||||
try:
|
||||
metadata = retrieve_metadata(f)
|
||||
print(f'{f}:\n',json.dumps(metadata['sd-metadata'], indent=4))
|
||||
except FileNotFoundError:
|
||||
sys.stderr.write(f'{f} not found\n')
|
||||
continue
|
||||
except PermissionError:
|
||||
sys.stderr.write(f'{f} could not be opened due to inadequate permissions\n')
|
||||
continue
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@@ -19,7 +19,7 @@ import warnings
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from shutil import move, rmtree
|
||||
from typing import Any, Optional, Union, Callable
|
||||
from typing import Any, Callable, Optional, Union, List
|
||||
|
||||
import safetensors
|
||||
import safetensors.torch
|
||||
@@ -35,12 +35,7 @@ from picklescan.scanner import scan_file_path
|
||||
from ldm.invoke.devices import CPU_DEVICE
|
||||
from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
from ldm.invoke.globals import Globals, global_cache_dir
|
||||
from ldm.util import (
|
||||
ask_user,
|
||||
download_with_resume,
|
||||
instantiate_from_config,
|
||||
url_attachment_name,
|
||||
)
|
||||
from ldm.util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name
|
||||
|
||||
|
||||
class SDLegacyType(Enum):
|
||||
@@ -51,12 +46,7 @@ class SDLegacyType(Enum):
|
||||
V2_v = 5
|
||||
UNKNOWN = 99
|
||||
|
||||
|
||||
DEFAULT_MAX_MODELS = 2
|
||||
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
|
||||
"vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse",
|
||||
}
|
||||
|
||||
|
||||
class ModelManager(object):
|
||||
def __init__(
|
||||
@@ -113,11 +103,7 @@ class ModelManager(object):
|
||||
requested_model = self.models[model_name]["model"]
|
||||
print(f">> Retrieving model {model_name} from system RAM cache")
|
||||
self.models[model_name]["model"] = self._model_from_cpu(requested_model)
|
||||
width = self.models[model_name]["width"]
|
||||
height = self.models[model_name]["height"]
|
||||
hash = self.models[model_name]["hash"]
|
||||
|
||||
else: # we're about to load a new model, so potentially offload the least recently used one
|
||||
else:
|
||||
requested_model, width, height, hash = self._load_model(model_name)
|
||||
self.models[model_name] = {
|
||||
"model": requested_model,
|
||||
@@ -128,13 +114,8 @@ class ModelManager(object):
|
||||
|
||||
self.current_model = model_name
|
||||
self._push_newest_model(model_name)
|
||||
return {
|
||||
"model": requested_model,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
return self.models[model_name]
|
||||
|
||||
def default_model(self) -> str | None:
|
||||
"""
|
||||
Returns the name of the default model, or None
|
||||
@@ -177,9 +158,9 @@ class ModelManager(object):
|
||||
"""
|
||||
# if we are converting legacy files automatically, then
|
||||
# there are no legacy ckpts!
|
||||
if Globals.ckpt_convert:
|
||||
return False
|
||||
info = self.model_info(model_name)
|
||||
if Globals.ckpt_convert or info.format=='diffusers' or self.is_v2_config(info.config):
|
||||
return False
|
||||
if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")):
|
||||
return True
|
||||
return False
|
||||
@@ -282,13 +263,13 @@ class ModelManager(object):
|
||||
self.stack.remove(model_name)
|
||||
if delete_files:
|
||||
if weights:
|
||||
print(f"** deleting file {weights}")
|
||||
print(f"** Deleting file {weights}")
|
||||
Path(weights).unlink(missing_ok=True)
|
||||
elif path:
|
||||
print(f"** deleting directory {path}")
|
||||
print(f"** Deleting directory {path}")
|
||||
rmtree(path, ignore_errors=True)
|
||||
elif repo_id:
|
||||
print(f"** deleting the cached model directory for {repo_id}")
|
||||
print(f"** Deleting the cached model directory for {repo_id}")
|
||||
self._delete_model_from_cache(repo_id)
|
||||
|
||||
def add_model(
|
||||
@@ -384,25 +365,43 @@ class ModelManager(object):
|
||||
if not os.path.isabs(weights):
|
||||
weights = os.path.normpath(os.path.join(Globals.root, weights))
|
||||
|
||||
# check whether this is a v2 file and force conversion
|
||||
convert = Globals.ckpt_convert or self.is_v2_config(config)
|
||||
|
||||
if matching_config := self._scan_for_matching_file(Path(weights),suffixes=['.yaml']):
|
||||
print(f' | Using external config file {matching_config}')
|
||||
config = matching_config
|
||||
|
||||
# get the path to the custom vae, if any
|
||||
vae_path = None
|
||||
# first we use whatever is in the config file
|
||||
if vae:
|
||||
path = Path(vae if os.path.isabs(vae) else os.path.normpath(os.path.join(Globals.root, vae)))
|
||||
if path.exists():
|
||||
vae_path = path
|
||||
# then we look for a file with the same basename
|
||||
vae_path = vae_path or self._scan_for_matching_file(Path(weights))
|
||||
|
||||
# if converting automatically to diffusers, then we do the conversion and return
|
||||
# a diffusers pipeline
|
||||
if Globals.ckpt_convert:
|
||||
if convert:
|
||||
print(
|
||||
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
|
||||
)
|
||||
from ldm.invoke.ckpt_to_diffuser import (
|
||||
load_pipeline_from_original_stable_diffusion_ckpt,
|
||||
)
|
||||
from ldm.invoke.ckpt_to_diffuser import load_pipeline_from_original_stable_diffusion_ckpt
|
||||
|
||||
self.offload_model(self.current_model)
|
||||
if vae_config := self._choose_diffusers_vae(model_name):
|
||||
vae = self._load_vae(vae_config)
|
||||
try:
|
||||
if self.list_models()[self.current_model]['status'] == 'active':
|
||||
self.offload_model(self.current_model)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if self._has_cuda():
|
||||
torch.cuda.empty_cache()
|
||||
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
checkpoint_path=weights,
|
||||
original_config_file=config,
|
||||
vae=vae,
|
||||
vae_path=vae_path,
|
||||
return_generator_pipeline=True,
|
||||
precision=torch.float16
|
||||
if self.precision == "float16"
|
||||
@@ -420,11 +419,6 @@ class ModelManager(object):
|
||||
"NOHASH",
|
||||
)
|
||||
|
||||
# scan model
|
||||
self.scan_model(model_name, weights)
|
||||
|
||||
print(f">> Loading {model_name} from {weights}")
|
||||
|
||||
# for usage statistics
|
||||
if self._has_cuda():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
@@ -438,10 +432,13 @@ class ModelManager(object):
|
||||
weight_bytes = f.read()
|
||||
model_hash = self._cached_sha256(weights, weight_bytes)
|
||||
sd = None
|
||||
if weights.endswith(".safetensors"):
|
||||
sd = safetensors.torch.load(weight_bytes)
|
||||
else:
|
||||
|
||||
if weights.endswith(".ckpt"):
|
||||
self.scan_model(model_name, weights)
|
||||
sd = torch.load(io.BytesIO(weight_bytes), map_location="cpu")
|
||||
else:
|
||||
sd = safetensors.torch.load(weight_bytes)
|
||||
|
||||
del weight_bytes
|
||||
# merged models from auto11 merge board are flat for some reason
|
||||
if "state_dict" in sd:
|
||||
@@ -459,26 +456,17 @@ class ModelManager(object):
|
||||
print(" | Using more accurate float32 precision")
|
||||
|
||||
# look and load a matching vae file. Code borrowed from AUTOMATIC1111 modules/sd_models.py
|
||||
if vae:
|
||||
if not os.path.isabs(vae):
|
||||
vae = os.path.normpath(os.path.join(Globals.root, vae))
|
||||
if os.path.exists(vae):
|
||||
print(f" | Loading VAE weights from: {vae}")
|
||||
vae_ckpt = None
|
||||
vae_dict = None
|
||||
if vae.endswith(".safetensors"):
|
||||
vae_ckpt = safetensors.torch.load_file(vae)
|
||||
vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"}
|
||||
else:
|
||||
vae_ckpt = torch.load(vae, map_location="cpu")
|
||||
vae_dict = {
|
||||
k: v
|
||||
for k, v in vae_ckpt["state_dict"].items()
|
||||
if k[0:4] != "loss"
|
||||
}
|
||||
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||
if vae_path:
|
||||
print(f" | Loading VAE weights from: {vae_path}")
|
||||
if vae_path.suffix in [".ckpt", ".pt"]:
|
||||
self.scan_model(vae_path.name, vae_path)
|
||||
vae_ckpt = torch.load(vae_path, map_location="cpu")
|
||||
else:
|
||||
print(f" | VAE file {vae} not found. Skipping.")
|
||||
vae_ckpt = safetensors.torch.load_file(vae_path)
|
||||
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
||||
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||
else:
|
||||
print(" | Using VAE built into model.")
|
||||
|
||||
model.to(self.device)
|
||||
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
|
||||
@@ -497,9 +485,9 @@ class ModelManager(object):
|
||||
|
||||
print(f">> Loading diffusers model from {name_or_path}")
|
||||
if using_fp16:
|
||||
print(" | Using faster float16 precision")
|
||||
print(" | Using faster float16 precision")
|
||||
else:
|
||||
print(" | Using more accurate float32 precision")
|
||||
print(" | Using more accurate float32 precision")
|
||||
|
||||
# TODO: scan weights maybe?
|
||||
pipeline_args: dict[str, Any] = dict(
|
||||
@@ -551,10 +539,21 @@ class ModelManager(object):
|
||||
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
|
||||
height = width
|
||||
|
||||
print(f" | Default image dimensions = {width} x {height}")
|
||||
print(f" | Default image dimensions = {width} x {height}")
|
||||
|
||||
return pipeline, width, height, model_hash
|
||||
|
||||
def is_v2_config(self, config: Path) -> bool:
|
||||
if not os.path.isabs(config):
|
||||
config = os.path.join(Globals.root, config)
|
||||
try:
|
||||
mconfig = OmegaConf.load(config)
|
||||
return (
|
||||
mconfig["model"]["params"]["unet_config"]["params"]["context_dim"] > 768
|
||||
)
|
||||
except:
|
||||
return False
|
||||
|
||||
def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path:
|
||||
if isinstance(model_name, DictConfig) or isinstance(model_name, dict):
|
||||
mconfig = model_name
|
||||
@@ -591,13 +590,14 @@ class ModelManager(object):
|
||||
if self._has_cuda():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
@classmethod
|
||||
def scan_model(self, model_name, checkpoint):
|
||||
"""
|
||||
Apply picklescanner to the indicated checkpoint and issue a warning
|
||||
and option to exit if an infected file is identified.
|
||||
"""
|
||||
# scan model
|
||||
print(f">> Scanning Model: {model_name}")
|
||||
print(f" | Scanning Model: {model_name}")
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files == 1:
|
||||
@@ -620,7 +620,7 @@ class ModelManager(object):
|
||||
print("### Exiting InvokeAI")
|
||||
sys.exit()
|
||||
else:
|
||||
print(">> Model scanned ok")
|
||||
print(" | Model scanned ok")
|
||||
|
||||
def import_diffuser_model(
|
||||
self,
|
||||
@@ -731,7 +731,7 @@ class ModelManager(object):
|
||||
SDLegacyType.V2_v (V2 using 'v_prediction' prediction type)
|
||||
SDLegacyType.UNKNOWN
|
||||
"""
|
||||
global_step = checkpoint.get('global_step')
|
||||
global_step = checkpoint.get("global_step")
|
||||
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||
|
||||
try:
|
||||
@@ -758,14 +758,14 @@ class ModelManager(object):
|
||||
return SDLegacyType.UNKNOWN
|
||||
|
||||
def heuristic_import(
|
||||
self,
|
||||
path_url_or_repo: str,
|
||||
convert: bool = False,
|
||||
model_name: str = None,
|
||||
description: str = None,
|
||||
model_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
config_file_callback: Callable[[Path],Path] = None,
|
||||
self,
|
||||
path_url_or_repo: str,
|
||||
convert: bool = False,
|
||||
model_name: str = None,
|
||||
description: str = None,
|
||||
model_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
config_file_callback: Callable[[Path], Path] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Accept a string which could be:
|
||||
@@ -800,26 +800,26 @@ class ModelManager(object):
|
||||
print(f">> Probing {thing} for import")
|
||||
|
||||
if thing.startswith(("http:", "https:", "ftp:")):
|
||||
print(f" | {thing} appears to be a URL")
|
||||
print(f" | {thing} appears to be a URL")
|
||||
model_path = self._resolve_path(
|
||||
thing, "models/ldm/stable-diffusion-v1"
|
||||
) # _resolve_path does a download if needed
|
||||
is_temporary = True
|
||||
|
||||
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
|
||||
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
|
||||
print(
|
||||
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
|
||||
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
|
||||
)
|
||||
return
|
||||
else:
|
||||
print(f" | {thing} appears to be a checkpoint file on disk")
|
||||
print(f" | {thing} appears to be a checkpoint file on disk")
|
||||
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
|
||||
|
||||
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
|
||||
print(f" | {thing} appears to be a diffusers file on disk")
|
||||
model_name = self.import_diffuser_model(
|
||||
thing,
|
||||
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
||||
model_name=model_name,
|
||||
description=description,
|
||||
commit_to_conf=commit_to_conf,
|
||||
@@ -839,10 +839,10 @@ class ModelManager(object):
|
||||
Path(thing).rglob("*.safetensors")
|
||||
):
|
||||
if model_name := self.heuristic_import(
|
||||
str(m),
|
||||
convert,
|
||||
commit_to_conf=commit_to_conf,
|
||||
config_file_callback=config_file_callback,
|
||||
str(m),
|
||||
convert,
|
||||
commit_to_conf=commit_to_conf,
|
||||
config_file_callback=config_file_callback,
|
||||
):
|
||||
print(f" >> {model_name} successfully imported")
|
||||
return model_name
|
||||
@@ -869,70 +869,78 @@ class ModelManager(object):
|
||||
return model_path.stem
|
||||
|
||||
# another round of heuristics to guess the correct config file.
|
||||
checkpoint = (
|
||||
safetensors.torch.load_file(model_path)
|
||||
if model_path.suffix == ".safetensors"
|
||||
else torch.load(model_path)
|
||||
)
|
||||
checkpoint = None
|
||||
if model_path.suffix.endswith((".ckpt", ".pt")):
|
||||
self.scan_model(model_path, model_path)
|
||||
checkpoint = torch.load(model_path)
|
||||
else:
|
||||
checkpoint = safetensors.torch.load_file(model_path)
|
||||
# additional probing needed if no config file provided
|
||||
if model_config_file is None:
|
||||
model_type = self.probe_model_type(checkpoint)
|
||||
if model_type == SDLegacyType.V1:
|
||||
print(" | SD-v1 model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V1_INPAINT:
|
||||
print(" | SD-v1 inpainting model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_v:
|
||||
print(
|
||||
" | SD-v2-v model detected"
|
||||
)
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_e:
|
||||
print(
|
||||
" | SD-v2-e model detected"
|
||||
)
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2:
|
||||
print(
|
||||
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
|
||||
)
|
||||
# Is there a like-named .yaml file in the same directory as the
|
||||
# weights file? If so, we treat this as our model
|
||||
if model_path.with_suffix(".yaml").exists():
|
||||
model_config_file = model_path.with_suffix(".yaml")
|
||||
print(f" | Using config file {model_config_file.name}")
|
||||
else:
|
||||
print(
|
||||
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
|
||||
)
|
||||
model_type = self.probe_model_type(checkpoint)
|
||||
if model_type == SDLegacyType.V1:
|
||||
print(" | SD-v1 model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V1_INPAINT:
|
||||
print(" | SD-v1 inpainting model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root,
|
||||
"configs/stable-diffusion/v1-inpainting-inference.yaml",
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_v:
|
||||
print(" | SD-v2-v model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2_e:
|
||||
print(" | SD-v2-e model detected")
|
||||
model_config_file = Path(
|
||||
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
|
||||
)
|
||||
elif model_type == SDLegacyType.V2:
|
||||
print(
|
||||
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide the configuration file type or path."
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide the configuration file type or path."
|
||||
)
|
||||
|
||||
if not model_config_file and config_file_callback:
|
||||
model_config_file = config_file_callback(model_path)
|
||||
if not model_config_file:
|
||||
return
|
||||
|
||||
if model_config_file.name.startswith('v2'):
|
||||
if self.is_v2_config(model_config_file):
|
||||
convert = True
|
||||
print(
|
||||
" | This SD-v2 model will be converted to diffusers format for use"
|
||||
)
|
||||
print(" | This SD-v2 model will be converted to diffusers format for use")
|
||||
|
||||
if (vae_path := self._scan_for_matching_file(model_path)):
|
||||
print(f" | Using VAE file {vae_path.name}")
|
||||
|
||||
if convert:
|
||||
diffuser_path = Path(
|
||||
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
|
||||
)
|
||||
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
|
||||
model_name = self.convert_and_import(
|
||||
model_path,
|
||||
diffusers_path=diffuser_path,
|
||||
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
||||
vae=vae,
|
||||
vae_path=vae_path,
|
||||
model_name=model_name,
|
||||
model_description=description,
|
||||
original_config_file=model_config_file,
|
||||
commit_to_conf=commit_to_conf,
|
||||
scan_needed=False,
|
||||
)
|
||||
# in the event that this file was downloaded automatically prior to conversion
|
||||
# we do not keep the original .ckpt/.safetensors around
|
||||
@@ -945,7 +953,8 @@ class ModelManager(object):
|
||||
model_name=model_name,
|
||||
model_description=description,
|
||||
vae=str(
|
||||
Path(
|
||||
vae_path
|
||||
or Path(
|
||||
Globals.root,
|
||||
"models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt",
|
||||
)
|
||||
@@ -962,9 +971,11 @@ class ModelManager(object):
|
||||
diffusers_path: Path,
|
||||
model_name=None,
|
||||
model_description=None,
|
||||
vae=None,
|
||||
vae: dict = None,
|
||||
vae_path: Path = None,
|
||||
original_config_file: Path = None,
|
||||
commit_to_conf: Path = None,
|
||||
scan_needed: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Convert a legacy ckpt weights file to diffuser model and import
|
||||
@@ -978,7 +989,7 @@ class ModelManager(object):
|
||||
|
||||
new_config = None
|
||||
|
||||
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
|
||||
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffusers
|
||||
|
||||
if diffusers_path.exists():
|
||||
print(
|
||||
@@ -992,18 +1003,23 @@ class ModelManager(object):
|
||||
try:
|
||||
# By passing the specified VAE to the conversion function, the autoencoder
|
||||
# will be built into the model rather than tacked on afterward via the config file
|
||||
vae_model = self._load_vae(vae) if vae else None
|
||||
convert_ckpt_to_diffuser(
|
||||
vae_model=None
|
||||
if vae:
|
||||
vae_model=self._load_vae(vae)
|
||||
vae_path=None
|
||||
convert_ckpt_to_diffusers(
|
||||
ckpt_path,
|
||||
diffusers_path,
|
||||
extract_ema=True,
|
||||
original_config_file=original_config_file,
|
||||
vae=vae_model,
|
||||
vae_path=vae_path,
|
||||
scan_needed=scan_needed,
|
||||
)
|
||||
print(
|
||||
f" | Success. Optimized model is now located at {str(diffusers_path)}"
|
||||
f" | Success. Optimized model is now located at {str(diffusers_path)}"
|
||||
)
|
||||
print(f" | Writing new config file entry for {model_name}")
|
||||
print(f" | Writing new config file entry for {model_name}")
|
||||
new_config = dict(
|
||||
path=str(diffusers_path),
|
||||
description=model_description,
|
||||
@@ -1044,36 +1060,6 @@ class ModelManager(object):
|
||||
|
||||
return search_folder, found_models
|
||||
|
||||
def _choose_diffusers_vae(
|
||||
self, model_name: str, vae: str = None
|
||||
) -> Union[dict, str]:
|
||||
# In the event that the original entry is using a custom ckpt VAE, we try to
|
||||
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
|
||||
# I would prefer to do this differently: We load the ckpt model into memory, swap the
|
||||
# VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped
|
||||
# VAE is built into the model. However, when I tried this I got obscure key errors.
|
||||
if vae:
|
||||
return vae
|
||||
if model_name in self.config and (
|
||||
vae_ckpt_path := self.model_info(model_name).get("vae", None)
|
||||
):
|
||||
vae_basename = Path(vae_ckpt_path).stem
|
||||
diffusers_vae = None
|
||||
if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None):
|
||||
print(
|
||||
f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version"
|
||||
)
|
||||
vae = {"repo_id": diffusers_vae}
|
||||
else:
|
||||
print(
|
||||
f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown'
|
||||
)
|
||||
print(
|
||||
'** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config'
|
||||
)
|
||||
vae = {"repo_id": "stabilityai/sd-vae-ft-mse"}
|
||||
return vae
|
||||
|
||||
def _make_cache_room(self) -> None:
|
||||
num_loaded_models = len(self.models)
|
||||
if num_loaded_models >= self.max_loaded_models:
|
||||
@@ -1136,14 +1122,14 @@ class ModelManager(object):
|
||||
legacy_locations = [
|
||||
Path(
|
||||
models_dir,
|
||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker"
|
||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
|
||||
),
|
||||
Path("bert-base-uncased/models--bert-base-uncased"),
|
||||
Path(
|
||||
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
|
||||
),
|
||||
]
|
||||
legacy_locations.extend(list(global_cache_dir("diffusers").glob('*')))
|
||||
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
|
||||
legacy_layout = False
|
||||
for model in legacy_locations:
|
||||
legacy_layout = legacy_layout or model.exists()
|
||||
@@ -1187,7 +1173,7 @@ class ModelManager(object):
|
||||
source.unlink()
|
||||
else:
|
||||
move(source, dest)
|
||||
|
||||
|
||||
# now clean up by removing any empty directories
|
||||
empty = [
|
||||
root
|
||||
@@ -1293,7 +1279,7 @@ class ModelManager(object):
|
||||
with open(hashpath) as f:
|
||||
hash = f.read()
|
||||
return hash
|
||||
print(" | Calculating sha256 hash of model files")
|
||||
print(" | Calculating sha256 hash of model files")
|
||||
tic = time.time()
|
||||
sha = hashlib.sha256()
|
||||
count = 0
|
||||
@@ -1305,7 +1291,7 @@ class ModelManager(object):
|
||||
sha.update(chunk)
|
||||
hash = sha.hexdigest()
|
||||
toc = time.time()
|
||||
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
|
||||
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
|
||||
with open(hashpath, "w") as f:
|
||||
f.write(hash)
|
||||
return hash
|
||||
@@ -1335,6 +1321,22 @@ class ModelManager(object):
|
||||
f.write(hash)
|
||||
return hash
|
||||
|
||||
@classmethod
|
||||
def _scan_for_matching_file(
|
||||
self,model_path: Path,
|
||||
suffixes: List[str]=['.vae.pt','.vae.ckpt','.vae.safetensors']
|
||||
)->Path:
|
||||
"""
|
||||
Find a file with same basename as the indicated model, but with one
|
||||
of the suffixes passed.
|
||||
"""
|
||||
# look for a custom vae
|
||||
vae_path = None
|
||||
for suffix in suffixes:
|
||||
if model_path.with_suffix(suffix).exists():
|
||||
vae_path = model_path.with_suffix(suffix)
|
||||
return vae_path
|
||||
|
||||
def _load_vae(self, vae_config) -> AutoencoderKL:
|
||||
vae_args = {}
|
||||
try:
|
||||
@@ -1346,16 +1348,16 @@ class ModelManager(object):
|
||||
using_fp16 = self.precision == "float16"
|
||||
|
||||
vae_args.update(
|
||||
cache_dir=global_cache_dir("hug"),
|
||||
cache_dir=global_cache_dir("hub"),
|
||||
local_files_only=not Globals.internet_available,
|
||||
)
|
||||
|
||||
print(f" | Loading diffusers VAE from {name_or_path}")
|
||||
print(f" | Loading diffusers VAE from {name_or_path}")
|
||||
if using_fp16:
|
||||
vae_args.update(torch_dtype=torch.float16)
|
||||
fp_args_list = [{"revision": "fp16"}, {}]
|
||||
else:
|
||||
print(" | Using more accurate float32 precision")
|
||||
print(" | Using more accurate float32 precision")
|
||||
fp_args_list = [{}]
|
||||
|
||||
vae = None
|
||||
@@ -1396,7 +1398,7 @@ class ModelManager(object):
|
||||
hashes_to_delete.add(revision.commit_hash)
|
||||
strategy = cache_info.delete_revisions(*hashes_to_delete)
|
||||
print(
|
||||
f"** deletion of this model is expected to free {strategy.expected_freed_size_str}"
|
||||
f"** Deletion of this model is expected to free {strategy.expected_freed_size_str}"
|
||||
)
|
||||
strategy.execute()
|
||||
|
||||
|
||||
@@ -30,14 +30,17 @@ class PngWriter:
|
||||
prefix = self._unused_prefix()
|
||||
else:
|
||||
with open(next_prefix_file,'r') as file:
|
||||
prefix=int(file.readline() or int(self._unused_prefix())-1)
|
||||
prefix+=1
|
||||
prefix = 0
|
||||
try:
|
||||
prefix=int(file.readline())
|
||||
except (TypeError, ValueError):
|
||||
prefix=self._unused_prefix()
|
||||
with open(next_prefix_file,'w') as file:
|
||||
file.write(str(prefix))
|
||||
file.write(str(prefix+1))
|
||||
return f'{prefix:06}'
|
||||
|
||||
# gives the next unique prefix in outdir
|
||||
def _unused_prefix(self):
|
||||
def _unused_prefix(self)->int:
|
||||
# sort reverse alphabetically until we find max+1
|
||||
dirlist = sorted(os.listdir(self.outdir), reverse=True)
|
||||
# find the first filename that matches our pattern or return 000000.0.png
|
||||
@@ -45,8 +48,7 @@ class PngWriter:
|
||||
(f for f in dirlist if re.match('^(\d+)\..*\.png', f)),
|
||||
'0000000.0.png',
|
||||
)
|
||||
basecount = int(existing_name.split('.', 1)[0]) + 1
|
||||
return f'{basecount:06}'
|
||||
return int(existing_name.split('.', 1)[0]) + 1
|
||||
|
||||
# saves image named _image_ to outdir/name, writing metadata from prompt
|
||||
# returns full path of output
|
||||
|
||||
@@ -17,6 +17,7 @@ from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
import npyscreen
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from npyscreen import widget
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
@@ -29,7 +30,7 @@ from ldm.invoke.training.textual_inversion_training import (
|
||||
TRAINING_DATA = "text-inversion-training-data"
|
||||
TRAINING_DIR = "text-inversion-output"
|
||||
CONF_FILE = "preferences.conf"
|
||||
|
||||
XFORMERS_AVAILABLE = is_xformers_available()
|
||||
|
||||
class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
resolutions = [512, 768, 1024]
|
||||
@@ -178,7 +179,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
out_of=10000,
|
||||
step=500,
|
||||
lowest=1,
|
||||
value=saved_args.get("max_train_steps", 3000),
|
||||
value=saved_args.get("max_train_steps", 2500),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.train_batch_size = self.add_widget_intelligent(
|
||||
@@ -187,7 +188,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
out_of=50,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get("train_batch_size", 8),
|
||||
value=saved_args.get("train_batch_size", 8 if XFORMERS_AVAILABLE else 3),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||
@@ -225,7 +226,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Use xformers acceleration",
|
||||
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
|
||||
value=saved_args.get("enable_xformers_memory_efficient_attention", XFORMERS_AVAILABLE),
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.lr_scheduler = self.add_widget_intelligent(
|
||||
@@ -428,8 +429,7 @@ def do_front_end(args: Namespace):
|
||||
print(str(e))
|
||||
print("** DETAILS:")
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
global_set_root(args.root_dir or Globals.root)
|
||||
|
||||
@@ -67,7 +67,7 @@ else:
|
||||
"nearest": PIL.Image.NEAREST,
|
||||
}
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
XFORMERS_AVAILABLE = is_xformers_available
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.10.0.dev0")
|
||||
@@ -227,7 +227,7 @@ def parse_args():
|
||||
training_group.add_argument(
|
||||
"--train_batch_size",
|
||||
type=int,
|
||||
default=16,
|
||||
default=8 if XFORMERS_AVAILABLE else 3,
|
||||
help="Batch size (per device) for the training dataloader.",
|
||||
)
|
||||
training_group.add_argument("--num_train_epochs", type=int, default=100)
|
||||
@@ -324,6 +324,7 @@ def parse_args():
|
||||
parser.add_argument(
|
||||
"--enable_xformers_memory_efficient_attention",
|
||||
action="store_true",
|
||||
default=XFORMERS_AVAILABLE,
|
||||
help="Whether or not to use xformers.",
|
||||
)
|
||||
|
||||
@@ -536,7 +537,7 @@ def do_textual_inversion_training(
|
||||
seed: int = None,
|
||||
resolution: int = 512,
|
||||
center_crop: bool = False,
|
||||
train_batch_size: int = 16,
|
||||
train_batch_size: int = 4,
|
||||
num_train_epochs: int = 100,
|
||||
max_train_steps: int = 5000,
|
||||
gradient_accumulation_steps: int = 1,
|
||||
|
||||
@@ -19,7 +19,7 @@ from functools import partial
|
||||
from tqdm import tqdm
|
||||
from torchvision.utils import make_grid
|
||||
from pytorch_lightning.utilities.distributed import rank_zero_only
|
||||
from omegaconf import ListConfig
|
||||
from omegaconf import ListConfig, OmegaConf
|
||||
import urllib
|
||||
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
@@ -609,6 +609,7 @@ class DDPM(pl.LightningModule):
|
||||
opt = torch.optim.AdamW(params, lr=lr)
|
||||
return opt
|
||||
|
||||
|
||||
|
||||
class LatentDiffusion(DDPM):
|
||||
"""main class"""
|
||||
@@ -617,7 +618,7 @@ class LatentDiffusion(DDPM):
|
||||
self,
|
||||
first_stage_config,
|
||||
cond_stage_config,
|
||||
personalization_config,
|
||||
personalization_config=None,
|
||||
num_timesteps_cond=None,
|
||||
cond_stage_key='image',
|
||||
cond_stage_trainable=False,
|
||||
@@ -675,7 +676,8 @@ class LatentDiffusion(DDPM):
|
||||
self.model.train = disabled_train
|
||||
for param in self.model.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
|
||||
personalization_config = personalization_config or self._fallback_personalization_config()
|
||||
self.embedding_manager = self.instantiate_embedding_manager(
|
||||
personalization_config, self.cond_stage_model
|
||||
)
|
||||
@@ -2150,6 +2152,25 @@ class LatentDiffusion(DDPM):
|
||||
|
||||
self.emb_ckpt_counter += 500
|
||||
|
||||
@classmethod
|
||||
def _fallback_personalization_config(self)->dict:
|
||||
"""
|
||||
This protects us against custom legacy config files that
|
||||
don't contain the personalization_config section.
|
||||
"""
|
||||
return OmegaConf.create(
|
||||
dict(
|
||||
target='ldm.modules.embedding_manager.EmbeddingManager',
|
||||
params=dict(
|
||||
placeholder_strings=list('*'),
|
||||
initializer_words=list('sculpture'),
|
||||
per_image_tokens=False,
|
||||
num_vectors_per_token=1,
|
||||
progressive_words=False,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class DiffusionWrapper(pl.LightningModule):
|
||||
def __init__(self, diff_model_config, conditioning_key):
|
||||
|
||||
@@ -463,6 +463,9 @@ class FrozenCLIPEmbedder(AbstractEncoder):
|
||||
def encode(self, text, **kwargs):
|
||||
return self(text, **kwargs)
|
||||
|
||||
def set_textual_inversion_manager(self, manager): #TextualInversionManager):
|
||||
self.textual_inversion_manager = manager
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
return self.transformer.device
|
||||
@@ -476,10 +479,6 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder):
|
||||
fragment_weights_key = "fragment_weights"
|
||||
return_tokens_key = "return_tokens"
|
||||
|
||||
def set_textual_inversion_manager(self, manager): #TextualInversionManager):
|
||||
# TODO all of the weighting and expanding stuff needs be moved out of this class
|
||||
self.textual_inversion_manager = manager
|
||||
|
||||
def forward(self, text: list, **kwargs):
|
||||
# TODO all of the weighting and expanding stuff needs be moved out of this class
|
||||
'''
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import os
|
||||
import traceback
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
@@ -71,21 +71,6 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
|
||||
if str(ckpt_path).endswith(".DS_Store"):
|
||||
return
|
||||
|
||||
try:
|
||||
scan_result = scan_file_path(str(ckpt_path))
|
||||
if scan_result.infected_files == 1:
|
||||
print(
|
||||
f"\n### Security Issues Found in Model: {scan_result.issues_count}"
|
||||
)
|
||||
print("### For your safety, InvokeAI will not load this embed.")
|
||||
return
|
||||
except Exception:
|
||||
print(
|
||||
f"### {ckpt_path.parents[0].name}/{ckpt_path.name} is damaged or corrupt."
|
||||
)
|
||||
return
|
||||
|
||||
embedding_info = self._parse_embedding(str(ckpt_path))
|
||||
|
||||
if embedding_info is None:
|
||||
@@ -96,7 +81,7 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
!= embedding_info["token_dim"]
|
||||
):
|
||||
print(
|
||||
f"** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with an incompatible token dimension: {self.text_encoder.get_input_embeddings().weight.data[0].shape[0]} vs {embedding_info['token_dim']}."
|
||||
f" ** Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with an incompatible token dimension: {self.text_encoder.get_input_embeddings().weight.data[0].shape[0]} vs {embedding_info['token_dim']}."
|
||||
)
|
||||
return
|
||||
|
||||
@@ -309,92 +294,72 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
|
||||
return token_id
|
||||
|
||||
def _parse_embedding(self, embedding_file: str):
|
||||
file_type = embedding_file.split(".")[-1]
|
||||
if file_type == "pt":
|
||||
return self._parse_embedding_pt(embedding_file)
|
||||
elif file_type == "bin":
|
||||
return self._parse_embedding_bin(embedding_file)
|
||||
else:
|
||||
print(f"** Notice: unrecognized embedding file format: {embedding_file}")
|
||||
def _parse_embedding(self, embedding_file: str)->dict:
|
||||
suffix = Path(embedding_file).suffix
|
||||
try:
|
||||
if suffix in [".pt",".ckpt",".bin"]:
|
||||
scan_result = scan_file_path(embedding_file)
|
||||
if scan_result.infected_files == 1:
|
||||
print(
|
||||
f" ** Security Issues Found in Model: {scan_result.issues_count}"
|
||||
)
|
||||
print(" ** For your safety, InvokeAI will not load this embed.")
|
||||
return
|
||||
ckpt = torch.load(embedding_file,map_location="cpu")
|
||||
else:
|
||||
ckpt = safetensors.torch.load_file(embedding_file)
|
||||
except Exception as e:
|
||||
print(f" ** Notice: unrecognized embedding file format: {embedding_file}: {e}")
|
||||
return None
|
||||
|
||||
def _parse_embedding_pt(self, embedding_file):
|
||||
embedding_ckpt = torch.load(embedding_file, map_location="cpu")
|
||||
embedding_info = {}
|
||||
|
||||
# Check if valid embedding file
|
||||
if "string_to_token" and "string_to_param" in embedding_ckpt:
|
||||
# Catch variants that do not have the expected keys or values.
|
||||
try:
|
||||
embedding_info["name"] = embedding_ckpt["name"] or os.path.basename(
|
||||
os.path.splitext(embedding_file)[0]
|
||||
)
|
||||
|
||||
# Check num of embeddings and warn user only the first will be used
|
||||
embedding_info["num_of_embeddings"] = len(
|
||||
embedding_ckpt["string_to_token"]
|
||||
)
|
||||
if embedding_info["num_of_embeddings"] > 1:
|
||||
print(">> More than 1 embedding found. Will use the first one")
|
||||
|
||||
embedding = list(embedding_ckpt["string_to_param"].values())[0]
|
||||
except (AttributeError, KeyError):
|
||||
return self._handle_broken_pt_variants(embedding_ckpt, embedding_file)
|
||||
|
||||
embedding_info["embedding"] = embedding
|
||||
embedding_info["num_vectors_per_token"] = embedding.size()[0]
|
||||
embedding_info["token_dim"] = embedding.size()[1]
|
||||
|
||||
try:
|
||||
embedding_info["trained_steps"] = embedding_ckpt["step"]
|
||||
embedding_info["trained_model_name"] = embedding_ckpt[
|
||||
"sd_checkpoint_name"
|
||||
]
|
||||
embedding_info["trained_model_checksum"] = embedding_ckpt[
|
||||
"sd_checkpoint"
|
||||
]
|
||||
except AttributeError:
|
||||
print(">> No Training Details Found. Passing ...")
|
||||
|
||||
# .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/
|
||||
# They are actually .bin files
|
||||
elif len(embedding_ckpt.keys()) == 1:
|
||||
embedding_info = self._parse_embedding_bin(embedding_file)
|
||||
|
||||
|
||||
# try to figure out what kind of embedding file it is and parse accordingly
|
||||
keys = list(ckpt.keys())
|
||||
if all(x in keys for x in ['string_to_token','string_to_param','name','step']):
|
||||
return self._parse_embedding_v1(ckpt, embedding_file) # example rem_rezero.pt
|
||||
|
||||
elif all(x in keys for x in ['string_to_token','string_to_param']):
|
||||
return self._parse_embedding_v2(ckpt, embedding_file) # example midj-strong.pt
|
||||
|
||||
elif 'emb_params' in keys:
|
||||
return self._parse_embedding_v3(ckpt, embedding_file) # example easynegative.safetensors
|
||||
|
||||
else:
|
||||
print(">> Invalid embedding format")
|
||||
embedding_info = None
|
||||
return self._parse_embedding_v4(ckpt, embedding_file) # usually a '.bin' file
|
||||
|
||||
def _parse_embedding_v1(self, embedding_ckpt: dict, file_path: str):
|
||||
basename = Path(file_path).stem
|
||||
print(f' | Loading v1 embedding file: {basename}')
|
||||
|
||||
embedding_info = {}
|
||||
embedding_info["name"] = embedding_ckpt["name"]
|
||||
|
||||
# Check num of embeddings and warn user only the first will be used
|
||||
embedding_info["num_of_embeddings"] = len(
|
||||
embedding_ckpt["string_to_token"]
|
||||
)
|
||||
if embedding_info["num_of_embeddings"] > 1:
|
||||
print(" | More than 1 embedding found. Will use the first one")
|
||||
embedding = list(embedding_ckpt["string_to_param"].values())[0]
|
||||
embedding_info["embedding"] = embedding
|
||||
embedding_info["num_vectors_per_token"] = embedding.size()[0]
|
||||
embedding_info["token_dim"] = embedding.size()[1]
|
||||
embedding_info["trained_steps"] = embedding_ckpt["step"]
|
||||
embedding_info["trained_model_name"] = embedding_ckpt[
|
||||
"sd_checkpoint_name"
|
||||
]
|
||||
embedding_info["trained_model_checksum"] = embedding_ckpt[
|
||||
"sd_checkpoint"
|
||||
]
|
||||
return embedding_info
|
||||
|
||||
def _parse_embedding_bin(self, embedding_file):
|
||||
embedding_ckpt = torch.load(embedding_file, map_location="cpu")
|
||||
embedding_info = {}
|
||||
|
||||
if list(embedding_ckpt.keys()) == 0:
|
||||
print(">> Invalid concepts file")
|
||||
embedding_info = None
|
||||
else:
|
||||
for token in list(embedding_ckpt.keys()):
|
||||
embedding_info["name"] = (
|
||||
token
|
||||
or f"<{os.path.basename(os.path.splitext(embedding_file)[0])}>"
|
||||
)
|
||||
embedding_info["embedding"] = embedding_ckpt[token]
|
||||
embedding_info[
|
||||
"num_vectors_per_token"
|
||||
] = 1 # All Concepts seem to default to 1
|
||||
embedding_info["token_dim"] = embedding_info["embedding"].size()[0]
|
||||
|
||||
return embedding_info
|
||||
|
||||
def _handle_broken_pt_variants(
|
||||
self, embedding_ckpt: dict, embedding_file: str
|
||||
def _parse_embedding_v2 (
|
||||
self, embedding_ckpt: dict, file_path: str
|
||||
) -> dict:
|
||||
"""
|
||||
This handles the broken .pt file variants. We only know of one at present.
|
||||
This handles embedding .pt file variant #2.
|
||||
"""
|
||||
basename = Path(file_path).stem
|
||||
print(f' | Loading v2 embedding file: {basename}')
|
||||
embedding_info = {}
|
||||
if isinstance(
|
||||
list(embedding_ckpt["string_to_token"].values())[0], torch.Tensor
|
||||
@@ -403,7 +368,7 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
embedding_info["name"] = (
|
||||
token
|
||||
if token != "*"
|
||||
else f"<{os.path.basename(os.path.splitext(embedding_file)[0])}>"
|
||||
else f"<{basename}>"
|
||||
)
|
||||
embedding_info["embedding"] = embedding_ckpt[
|
||||
"string_to_param"
|
||||
@@ -413,7 +378,46 @@ class TextualInversionManager(BaseTextualInversionManager):
|
||||
].shape[0]
|
||||
embedding_info["token_dim"] = embedding_info["embedding"].size()[1]
|
||||
else:
|
||||
print(">> Invalid embedding format")
|
||||
print(f" ** {basename}: Unrecognized embedding format")
|
||||
embedding_info = None
|
||||
|
||||
return embedding_info
|
||||
|
||||
def _parse_embedding_v3(self, embedding_ckpt: dict, file_path: str):
|
||||
"""
|
||||
Parse 'version 3' of the .pt textual inversion embedding files.
|
||||
"""
|
||||
basename = Path(file_path).stem
|
||||
print(f' | Loading v3 embedding file: {basename}')
|
||||
embedding_info = {}
|
||||
embedding_info["name"] = f'<{basename}>'
|
||||
embedding_info["num_of_embeddings"] = 1
|
||||
embedding = embedding_ckpt['emb_params']
|
||||
embedding_info["embedding"] = embedding
|
||||
embedding_info["num_vectors_per_token"] = embedding.size()[0]
|
||||
embedding_info["token_dim"] = embedding.size()[1]
|
||||
return embedding_info
|
||||
|
||||
def _parse_embedding_v4(self, embedding_ckpt: dict, filepath: str):
|
||||
"""
|
||||
Parse 'version 4' of the textual inversion embedding files. This one
|
||||
is usually associated with .bin files trained by HuggingFace diffusers.
|
||||
"""
|
||||
basename = Path(filepath).stem
|
||||
short_path = Path(filepath).parents[0].name+'/'+Path(filepath).name
|
||||
|
||||
print(f' | Loading v4 embedding file: {short_path}')
|
||||
embedding_info = {}
|
||||
if list(embedding_ckpt.keys()) == 0:
|
||||
print(f" ** Invalid embeddings file: {short_path}")
|
||||
embedding_info = None
|
||||
else:
|
||||
for token in list(embedding_ckpt.keys()):
|
||||
embedding_info["name"] = (
|
||||
token
|
||||
or f"<{basename}>"
|
||||
)
|
||||
embedding_info["embedding"] = embedding_ckpt[token]
|
||||
embedding_info["num_vectors_per_token"] = 1 # All Concepts seem to default to 1
|
||||
embedding_info["token_dim"] = embedding_info["embedding"].size()[0]
|
||||
return embedding_info
|
||||
|
||||
@@ -329,7 +329,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
||||
resp = requests.get(url, headers=header, stream=True) # new request with range
|
||||
|
||||
if exist_size > content_length:
|
||||
print('* corrupt existing file found. re-downloading')
|
||||
print(f'* corrupt existing file found (existing_size={exist_size}, content_length={content_length}). re-downloading')
|
||||
os.remove(dest)
|
||||
exist_size = 0
|
||||
|
||||
@@ -341,15 +341,12 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
||||
elif resp.status_code == 206 or exist_size > 0:
|
||||
print(f"* {dest}: partial file found. Resuming...")
|
||||
elif resp.status_code != 200:
|
||||
print(f"** An error occurred during downloading {dest}: {resp.reason}")
|
||||
print(f"** An error occurred while downloading {url}: {resp.reason}")
|
||||
return None
|
||||
else:
|
||||
print(f"* {dest}: Downloading...")
|
||||
|
||||
try:
|
||||
if content_length < 2000:
|
||||
print(f"*** ERROR DOWNLOADING {url}: {resp.text}")
|
||||
return None
|
||||
|
||||
with open(dest, open_mode) as file, tqdm(
|
||||
desc=str(dest),
|
||||
initial=exist_size,
|
||||
|
||||
@@ -58,6 +58,7 @@ dependencies = [
|
||||
"pillow",
|
||||
"prompt-toolkit",
|
||||
"pudb",
|
||||
"packaging",
|
||||
"pypatchmatch",
|
||||
"pyreadline3",
|
||||
"pytorch-lightning==1.7.7",
|
||||
@@ -70,7 +71,7 @@ dependencies = [
|
||||
"taming-transformers-rom1504",
|
||||
"test-tube>=0.7.5",
|
||||
"torch-fidelity",
|
||||
"torch>=1.13.1",
|
||||
"torch~=1.13.1",
|
||||
"torchmetrics",
|
||||
"torchvision>=0.14.1",
|
||||
"transformers~=4.26",
|
||||
@@ -121,6 +122,7 @@ requires-python = ">=3.9, <3.11"
|
||||
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
|
||||
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
|
||||
"invokeai-batch" = "ldm.invoke.dynamic_prompts:main"
|
||||
"invokeai-metadata" = "ldm.invoke.invokeai_metadata:main"
|
||||
|
||||
[project.urls]
|
||||
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
||||
@@ -147,7 +149,7 @@ version = {attr = "ldm.invoke.__version__"}
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"invokeai.assets.web" = ["**.png"]
|
||||
"invokeai.configs" = ["**.example", "**.txt", "**.yaml"]
|
||||
"invokeai.configs" = ["**.example", "**.txt", "**.yaml", "**/*.yaml"]
|
||||
"invokeai.frontend.dist" = ["**"]
|
||||
|
||||
[tool.black]
|
||||
|
||||
Reference in New Issue
Block a user