mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 09:18:00 -05:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0439b51a26 | ||
|
|
ef6870c714 | ||
|
|
8cbb50c204 | ||
|
|
12a8d7fc14 | ||
|
|
3d2b497eb0 | ||
|
|
786b8878d6 | ||
|
|
55132f6463 | ||
|
|
ed9186b099 | ||
|
|
d2026d0509 | ||
|
|
0bc4ed14cd | ||
|
|
06369d07c0 | ||
|
|
4e61069821 | ||
|
|
d7ba041007 | ||
|
|
3859302f1c | ||
|
|
865439114b | ||
|
|
4d76116152 | ||
|
|
42f5bd4e12 | ||
|
|
04e77f3858 | ||
|
|
1fc1eeec38 | ||
|
|
556081695a | ||
|
|
ad7917c7aa | ||
|
|
39cca8139f | ||
|
|
1d1988683b | ||
|
|
44a0055571 | ||
|
|
0cc01143d8 | ||
|
|
1c0247d58a | ||
|
|
d335f51e5f | ||
|
|
38cd968130 | ||
|
|
0111304982 | ||
|
|
c607d4fe6c | ||
|
|
6d6076d3c7 | ||
|
|
485fcc7fcb | ||
|
|
76633f500a | ||
|
|
ed6194351c | ||
|
|
f237744ab1 | ||
|
|
678cf8519e | ||
|
|
ee9de75b8d | ||
|
|
50f3847ef8 | ||
|
|
8596e3586c | ||
|
|
5ef1e0714b | ||
|
|
be871c3ab3 | ||
|
|
dec40d9b04 | ||
|
|
fe5c008dd5 | ||
|
|
72def2ae13 | ||
|
|
31cd76a2af | ||
|
|
00c78263ce | ||
|
|
5c31feb3a1 | ||
|
|
26f129cef8 | ||
|
|
292ee06751 | ||
|
|
c00d53fcce | ||
|
|
a78a8728fe | ||
|
|
6b5d19347a | ||
|
|
26671d8eed | ||
|
|
b487fa4391 | ||
|
|
12b98ba4ec | ||
|
|
fa25a64d37 | ||
|
|
29540452f2 |
@@ -1,12 +1,26 @@
|
||||
*
|
||||
!backend
|
||||
!configs
|
||||
!environments-and-requirements
|
||||
!frontend
|
||||
!installer
|
||||
!binary_installer
|
||||
!ldm
|
||||
!main.py
|
||||
!scripts
|
||||
!server
|
||||
!static
|
||||
!setup.py
|
||||
!docker-build
|
||||
!docs
|
||||
docker-build/Dockerfile
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
|
||||
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
||||
!configs
|
||||
configs/models.yaml
|
||||
|
||||
# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists
|
||||
!environment*
|
||||
environment.yml
|
||||
|
||||
**/__pycache__
|
||||
|
||||
87
.github/workflows/build-cloud-img.yml
vendored
Normal file
87
.github/workflows/build-cloud-img.yml
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
name: Build and push cloud image
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
# we will NOT push the image on pull requests, only test buildability.
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
# requires resolving a patchmatch issue
|
||||
# - aarch64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# see https://github.com/docker/metadata-action
|
||||
# will push the following tags:
|
||||
# :edge
|
||||
# :main (+ any other branches enabled in the workflow)
|
||||
# :<tag>
|
||||
# :1.2.3 (for semver tags)
|
||||
# :1.2 (for semver tags)
|
||||
# :<sha>
|
||||
tags: |
|
||||
type=edge,branch=main
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha
|
||||
# suffix image tags with architecture
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=-${{ matrix.arch }},latest=true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
# do not login to container registry on PRs
|
||||
- if: github.event_name != 'pull_request'
|
||||
name: Docker login
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push cloud image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile.cloud
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
# do not push the image on PRs
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
37
.github/workflows/test-invoke-conda.yml
vendored
37
.github/workflows/test-invoke-conda.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'fix-gh-actions-fork'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
@@ -20,16 +19,28 @@ jobs:
|
||||
- environment-lin-amd.yml
|
||||
- environment-lin-cuda.yml
|
||||
- environment-mac.yml
|
||||
- environment-win-cuda.yml
|
||||
include:
|
||||
- environment-yaml: environment-lin-amd.yml
|
||||
os: ubuntu-latest
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-lin-cuda.yml
|
||||
os: ubuntu-latest
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-mac.yml
|
||||
os: macos-12
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-win-cuda.yml
|
||||
os: windows-2022
|
||||
curl-command: curl.exe
|
||||
github-env: $env:GITHUB_ENV
|
||||
default-shell: pwsh
|
||||
- stable-diffusion-model: stable-diffusion-1.5
|
||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||
@@ -72,15 +83,15 @@ jobs:
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
@@ -96,22 +107,20 @@ jobs:
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
|
||||
-L ${{ matrix.stable-diffusion-model-url }}
|
||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
- name: cat invokeai.init
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
@@ -123,11 +132,13 @@ jobs:
|
||||
|
||||
- name: export conda env
|
||||
id: export-conda-env
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
mkdir -p outputs/img-samples
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
|
||||
- name: Archive results
|
||||
if: matrix.os != 'windows-2022'
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
70
.github/workflows/test-invoke-pip.yml
vendored
70
.github/workflows/test-invoke-pip.yml
vendored
@@ -19,35 +19,50 @@ jobs:
|
||||
- requirements-lin-cuda.txt
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-mac-mps-cpu.txt
|
||||
- requirements-win-colab-cuda.txt
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
include:
|
||||
- requirements-file: requirements-lin-cuda.txt
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-lin-amd.txt
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-mac-mps-cpu.txt
|
||||
os: macOS-12
|
||||
default-shell: bash -l {0}
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-win-colab-cuda.txt
|
||||
os: windows-2022
|
||||
curl-command: curl.exe
|
||||
github-env: $env:GITHUB_ENV
|
||||
- stable-diffusion-model: stable-diffusion-1.5
|
||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
||||
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
env:
|
||||
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: set INVOKEAI_ROOT Windows
|
||||
if: matrix.os == 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set INVOKEAI_ROOT others
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: create models.yaml from example
|
||||
run: |
|
||||
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
||||
@@ -55,15 +70,15 @@ jobs:
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: create requirements.txt
|
||||
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
||||
@@ -72,14 +87,14 @@ jobs:
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
# cache: 'pip'
|
||||
# cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
|
||||
# - name: install dependencies
|
||||
# run: ${{ env.pythonLocation }}/bin/pip install --upgrade pip setuptools wheel
|
||||
|
||||
- name: install requirements
|
||||
run: ${{ env.pythonLocation }}/bin/pip install -r '${{ matrix.requirements-file }}'
|
||||
run: pip3 install -r '${{ matrix.requirements-file }}'
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
@@ -95,33 +110,20 @@ jobs:
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
|
||||
-L ${{ matrix.stable-diffusion-model-url }}
|
||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
${{ env.pythonLocation }}/bin/python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
run: python3 scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
run: |
|
||||
time ${{ env.pythonLocation }}/bin/python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
--no-nsfw_checker \
|
||||
--model ${{ matrix.stable-diffusion-model }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }} \
|
||||
--root="${{ env.INVOKEAI_ROOT }}" \
|
||||
--outdir="${{ env.INVOKEAI_ROOT }}/outputs"
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}"
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
if: matrix.os != 'windows-2022'
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -222,12 +222,11 @@ environment.yml
|
||||
requirements.txt
|
||||
|
||||
# source installer files
|
||||
source_installer/*zip
|
||||
source_installer/invokeAI
|
||||
install.bat
|
||||
install.sh
|
||||
update.bat
|
||||
update.sh
|
||||
installer/*zip
|
||||
installer/install.bat
|
||||
installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
||||
82
README.md
82
README.md
@@ -1,11 +1,9 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
|
||||

|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||
@@ -38,18 +36,33 @@ This is a fork of
|
||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||
the open source text-to-image generator. It provides a streamlined
|
||||
process with various new features and options to aid the image
|
||||
generation process. It runs on Windows, Mac and Linux machines, with
|
||||
generation process. It runs on Windows, macOS and Linux machines, with
|
||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||
Web interface (see below), and an easy-to-use command-line interface.
|
||||
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
_Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||
|
||||
# Getting Started with InvokeAI
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.2.3)
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
3. Unzip the file.
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
5. Wait a while, until it is done.
|
||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
||||
8. Type `banana sushi` in the box on the top left and click `Invoke`:
|
||||
|
||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
||||
|
||||
## Table of Contents
|
||||
|
||||
@@ -69,10 +82,13 @@ This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
#### System
|
||||
|
||||
You wil need one of the following:
|
||||
@@ -80,6 +96,10 @@ You wil need one of the following:
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- An Apple computer with an M1 chip.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
#### Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
@@ -97,11 +117,12 @@ Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter
|
||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||
```
|
||||
Or by updating your InvokeAI configuration file with this argument.
|
||||
|
||||
### Features
|
||||
|
||||
@@ -130,39 +151,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
### Latest Changes
|
||||
|
||||
- v2.0.1 (13 October 2022)
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
- v2.0.0 (9 October 2022)
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
@@ -172,8 +161,9 @@ problems and other issues.
|
||||
# Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so. To join, just raise your hand on the InvokeAI
|
||||
Discord server or discussion board.
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, here is a
|
||||
|
||||
@@ -18,9 +18,11 @@ from PIL.Image import Image as ImageType
|
||||
from uuid import uuid4
|
||||
from threading import Event
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
|
||||
from backend.modules.parameters import parameters_to_command
|
||||
@@ -39,7 +41,7 @@ if not os.path.isabs(args.outdir):
|
||||
|
||||
|
||||
class InvokeAIWebServer:
|
||||
def __init__(self, generate, gfpgan, codeformer, esrgan) -> None:
|
||||
def __init__(self, generate: Generate, gfpgan, codeformer, esrgan) -> None:
|
||||
self.host = args.host
|
||||
self.port = args.port
|
||||
|
||||
@@ -207,9 +209,10 @@ class InvokeAIWebServer:
|
||||
FlaskUI(
|
||||
app=self.app,
|
||||
socketio=self.socketio,
|
||||
server="flask_socketio",
|
||||
start_server="flask-socketio",
|
||||
width=1600,
|
||||
height=1000,
|
||||
idle_interval=10,
|
||||
port=self.port
|
||||
).run()
|
||||
except KeyboardInterrupt:
|
||||
@@ -243,14 +246,16 @@ class InvokeAIWebServer:
|
||||
|
||||
def find_frontend(self):
|
||||
my_dir = os.path.dirname(__file__)
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist') # pip install .
|
||||
# LS: setup.py seems to put the frontend in different places on different systems, so
|
||||
# this is fragile and needs to be replaced with a better way of finding the front end.
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist'), # pip install . (Linux, Mac)
|
||||
os.path.join(my_dir,'../../../frontend','dist'), # pip install . (Windows)
|
||||
):
|
||||
if os.path.exists(candidate):
|
||||
return candidate
|
||||
assert "Frontend files cannot be found. Cannot continue"
|
||||
|
||||
|
||||
def setup_app(self):
|
||||
self.result_url = "outputs/"
|
||||
self.init_image_url = "outputs/init-images/"
|
||||
@@ -775,10 +780,10 @@ class InvokeAIWebServer:
|
||||
).convert("RGBA")
|
||||
|
||||
"""
|
||||
The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
|
||||
The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
|
||||
to the generator should be:
|
||||
{
|
||||
"x": 0,
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": original_bounding_box["width"],
|
||||
"height": original_bounding_box["height"]
|
||||
@@ -798,7 +803,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
"""
|
||||
Apply the mask to the init image, creating a "mask" image with
|
||||
Apply the mask to the init image, creating a "mask" image with
|
||||
transparency where inpainting should occur. This is the kind of
|
||||
mask that prompt2image() needs.
|
||||
"""
|
||||
@@ -904,16 +909,13 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
if generation_parameters["progress_latents"]:
|
||||
image = self.generate.sample_to_lowres_estimated_image(sample)
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
img_base64 = "data:image/png;base64," + base64.b64encode(
|
||||
buffered.getvalue()
|
||||
).decode("UTF-8")
|
||||
img_base64 = image_to_dataURL(image)
|
||||
self.socketio.emit(
|
||||
"intermediateResult",
|
||||
{
|
||||
@@ -931,7 +933,7 @@ class InvokeAIWebServer:
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed):
|
||||
def image_done(image, seed, first_seed, attention_maps_image=None):
|
||||
if self.canceled.is_set():
|
||||
raise CanceledException
|
||||
|
||||
@@ -1093,6 +1095,12 @@ class InvokeAIWebServer:
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
|
||||
tokens = None if type(parsed_prompt) is Blend else \
|
||||
get_tokens_for_prompt(self.generate.model, parsed_prompt)
|
||||
attention_maps_image_base64_url = None if attention_maps_image is None \
|
||||
else image_to_dataURL(attention_maps_image)
|
||||
|
||||
self.socketio.emit(
|
||||
"generationResult",
|
||||
{
|
||||
@@ -1105,6 +1113,8 @@ class InvokeAIWebServer:
|
||||
"height": height,
|
||||
"boundingBox": original_bounding_box,
|
||||
"generationMode": generation_parameters["generation_mode"],
|
||||
"attentionMaps": attention_maps_image_base64_url,
|
||||
"tokens": tokens,
|
||||
},
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
@@ -1116,7 +1126,7 @@ class InvokeAIWebServer:
|
||||
self.generate.prompt2image(
|
||||
**generation_parameters,
|
||||
step_callback=image_progress,
|
||||
image_callback=image_done,
|
||||
image_callback=image_done
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
@@ -1563,6 +1573,19 @@ def dataURL_to_image(dataURL: str) -> ImageType:
|
||||
)
|
||||
return image
|
||||
|
||||
"""
|
||||
Converts an image into a base64 image dataURL.
|
||||
"""
|
||||
|
||||
def image_to_dataURL(image: ImageType) -> str:
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
image_base64 = "data:image/png;base64," + base64.b64encode(
|
||||
buffered.getvalue()
|
||||
).decode("UTF-8")
|
||||
return image_base64
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Converts a base64 image dataURL into bytes.
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh.in InvokeAI/install.sh
|
||||
chmod a+x InvokeAI/install.sh
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r InvokeAI-binary-linux.zip InvokeAI
|
||||
zip -r InvokeAI-binary-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat.in InvokeAI/install.bat
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r InvokeAI-binary-windows.zip InvokeAI
|
||||
|
||||
rm -rf InvokeAI
|
||||
|
||||
echo "The installer zips are ready for distribution."
|
||||
@@ -19,7 +19,6 @@ if "%1" == "use-cache" (
|
||||
)
|
||||
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
echo "USING development BRANCH. REMEMBER TO CHANGE TO main BEFORE RELEASE"
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
|
||||
@@ -213,7 +213,8 @@ _err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp binary_installer/invoke.sh .
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+rx ./invoke.sh
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo Do you want to generate images using the
|
||||
@@ -10,10 +11,10 @@ echo 3. open the developer console
|
||||
set /p choice="Please enter 1, 2 or 3: "
|
||||
if /i "%choice%" == "1" (
|
||||
echo Starting the InvokeAI command-line.
|
||||
.venv\Scripts\python scripts\invoke.py
|
||||
.venv\Scripts\python scripts\invoke.py %*
|
||||
) else if /i "%choice%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI.
|
||||
.venv\Scripts\python scripts\invoke.py --web
|
||||
.venv\Scripts\python scripts\invoke.py --web %*
|
||||
) else if /i "%choice%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
|
||||
@@ -20,11 +20,11 @@ read choice
|
||||
case $choice in
|
||||
1)
|
||||
printf "\nStarting the InvokeAI command-line..\n";
|
||||
.venv/bin/python scripts/invoke.py;
|
||||
.venv/bin/python scripts/invoke.py $*;
|
||||
;;
|
||||
2)
|
||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
||||
.venv/bin/python scripts/invoke.py --web;
|
||||
.venv/bin/python scripts/invoke.py --web $*;
|
||||
;;
|
||||
3)
|
||||
printf "\nDeveloper Console:\n";
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with python 3.9
|
||||
# To update, run:
|
||||
# This file is autogenerated by pip-compile with Python 3.9
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/py3.10-linux-x86_64-cuda-reqs.txt binary_installer/requirements.in
|
||||
#
|
||||
@@ -418,8 +418,8 @@ getpass-asterisk==1.0.1 \
|
||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||
# via -r binary_installer/requirements.in
|
||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
||||
gfpgan @ https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system == "Linux" or platform_system == "Darwin" \
|
||||
--hash=sha256:4155907b8b7db3686324554df7007eedd245cdf8656c21da9d9a3f44bef2fcaa
|
||||
# via
|
||||
# -r binary_installer/requirements.in
|
||||
# realesrgan
|
||||
|
||||
@@ -26,6 +26,7 @@ transformers
|
||||
picklescan
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||
https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip
|
||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
||||
|
||||
@@ -25,3 +25,5 @@ inpainting-1.5:
|
||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
width: 512
|
||||
height: 512
|
||||
|
||||
@@ -32,7 +32,7 @@ model:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['sculpture']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 8
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
|
||||
86
docker-build/Dockerfile.cloud
Normal file
86
docker-build/Dockerfile.cloud
Normal file
@@ -0,0 +1,86 @@
|
||||
#######################
|
||||
#### Builder stage ####
|
||||
|
||||
FROM library/ubuntu:22.04 AS builder
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt-get install -y \
|
||||
git \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
python3-opencv \
|
||||
libopencv-dev
|
||||
|
||||
# This is needed for patchmatch support
|
||||
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
|
||||
ARG WORKDIR=/invokeai
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
||||
torch==1.12.0+cu116 \
|
||||
torchvision==0.13.0+cu116 &&\
|
||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||
|
||||
COPY . .
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
||||
pip install -r requirements.txt &&\
|
||||
pip install -e .
|
||||
|
||||
|
||||
#######################
|
||||
#### Runtime stage ####
|
||||
|
||||
FROM library/ubuntu:22.04 as runtime
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
ncdu \
|
||||
iotop \
|
||||
bzip2 \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
python3-opencv \
|
||||
libopencv-dev &&\
|
||||
apt-get clean && apt-get autoclean
|
||||
|
||||
ARG WORKDIR=/invokeai
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
ENV INVOKEAI_ROOT=/mnt/invokeai
|
||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
||||
|
||||
# build patchmatch
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
||||
RUN touch /root/.invokeai
|
||||
|
||||
ENTRYPOINT ["bash"]
|
||||
|
||||
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
||||
44
docker-build/Makefile
Normal file
44
docker-build/Makefile
Normal file
@@ -0,0 +1,44 @@
|
||||
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
||||
INVOKEAI_ROOT=/mnt/invokeai
|
||||
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
||||
HOST_MOUNT_PATH=${HOME}/invokeai
|
||||
|
||||
IMAGE=local/invokeai:latest
|
||||
|
||||
USER=$(shell id -u)
|
||||
GROUP=$(shell id -g)
|
||||
|
||||
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
||||
# This is consistent with the expected non-Docker behaviour.
|
||||
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
||||
|
||||
build:
|
||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||
|
||||
configure:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
${IMAGE} -c "python scripts/configure_invokeai.py"
|
||||
|
||||
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
||||
web:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
-p 9090:9090 \
|
||||
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
||||
|
||||
# Run the cli with the runtime dir mounted
|
||||
cli:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
${IMAGE} -c "python scripts/invoke.py"
|
||||
|
||||
# Run the container with the runtime dir mounted and open a bash shell
|
||||
shell:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
||||
|
||||
.PHONY: build configure web cli shell
|
||||
@@ -82,13 +82,18 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
First time users, please see [Automated
|
||||
Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||
getting InvokeAI up and running on your system. For alternative
|
||||
installation and upgrade instructions, please see: [InvokeAI
|
||||
Installation Overview](installation/)
|
||||
|
||||
Linux users who wish to make use of the PyPatchMatch inpainting
|
||||
functions will need to perform a bit of extra work to enable this
|
||||
module. Instructions can be found at [Installing PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
module. Instructions can be found at [Installing
|
||||
PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
@@ -100,22 +105,25 @@ You wil need one of the following:
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with
|
||||
their running in half-precision mode and having insufficient VRAM to
|
||||
render 512x512 images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||
### :fontawesome-regular-hard-drive: Disk
|
||||
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
!!! info
|
||||
|
||||
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
||||
full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter errors like
|
||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||
`invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
310
docs/installation/INSTALL_AUTOMATED.md
Normal file
310
docs/installation/INSTALL_AUTOMATED.md
Normal file
@@ -0,0 +1,310 @@
|
||||
---
|
||||
title: InvokeAI Automated Installation
|
||||
---
|
||||
|
||||
# InvokeAI Automated Installation
|
||||
|
||||
## Introduction
|
||||
|
||||
The automated installer is a shell script that attempts to automate
|
||||
every step needed to install and run InvokeAI on a stock computer
|
||||
running recent versions of Linux, MacOS or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI with the option
|
||||
to upgrade to experimental versions later.
|
||||
|
||||
## Walk through
|
||||
|
||||
1. Make sure that your system meets the [hardware
|
||||
requirements](../index.md#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux
|
||||
user with an AMD GPU installed, you may need to install the [ROCm
|
||||
driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
- Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
2. Check that your system has an up-to-date Python installed. To do
|
||||
this, open up a command-line window ("Terminal" on Linux and
|
||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||
--version`. If Python is installed, it will print out the version
|
||||
number. If it is version `3.9.1` or higher, you meet requirements.
|
||||
|
||||
- If you see an older version, or you get a command not found
|
||||
error, then go to [Python
|
||||
Downloads](https://www.python.org/downloads/) and download the
|
||||
appropriate installer package for your platform. We recommend
|
||||
[Version
|
||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI.
|
||||
|
||||
-**Windows users**: During the Python configuration process,
|
||||
Please look out for a checkbox to add Python to your PATH
|
||||
and select it. If the install script complains that it can't
|
||||
find python, then open the Python installer again and choose
|
||||
"Modify" existing installation.
|
||||
|
||||
- **Mac users**: After installing Python, you may need to run the
|
||||
following command from the Terminal in order to install the Web
|
||||
certificates needed to download model data from https sites. If
|
||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||
install, this is the problem:
|
||||
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
Do not use Python 3.11 at this time due to poor performance
|
||||
of the underlying pytorch machine learning library.
|
||||
|
||||
- **Linux users**: See [Installing Python in Ubuntu](#installing-python-in-ubuntu) for some
|
||||
platform-specific tips.
|
||||
|
||||
3. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
|
||||
- [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
|
||||
- [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
4. If you are a macOS user, you may need to install the Xcode command line tools.
|
||||
These are a set of tools that are needed to run certain applications in a Terminal,
|
||||
including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
- To install, open a terminal window and run `xcode-select
|
||||
--install`. You will get a macOS system popup guiding you through
|
||||
the install. If you already have them installed, you will instead
|
||||
see some output in the Terminal advising you that the tools are
|
||||
already installed.
|
||||
|
||||
- More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
5. If you are a Windows users, there is a slight possibility that you
|
||||
will encountered DLL load errors at the very end of the installation
|
||||
process. This is caused by not having up to date Visual C++
|
||||
redistributable libraries. If this happens to you, you can install
|
||||
the C++ libraries from this site:
|
||||
https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||
|
||||
6. Unpack the zip file into a convenient directory. This will create
|
||||
a new directory named "InvokeAI-Installer". This example shows how
|
||||
this would look using the `unzip` command-line tool, but you may
|
||||
use any graphical or command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
||||
creating: InvokeAI-Installer\
|
||||
inflating: InvokeAI-Installer\install.bat
|
||||
inflating: InvokeAI-Installer\readme.txt
|
||||
...
|
||||
```
|
||||
|
||||
After successful installation, you can delete the
|
||||
`InvokeAI-Installer` directory.
|
||||
|
||||
7. Windows users should now double-click on the file WinLongPathsEnabled.reg
|
||||
and accept the dialog box that asks you if you wish to modify your
|
||||
registry. This activates long filename support on your system and will
|
||||
prevent mysterious errors during installation.
|
||||
|
||||
8. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||
|
||||
9. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
10. The script will ask you to choose where to install InvokeAI. Select
|
||||
a directory with at least 18G of free space for a full
|
||||
install. InvokeAI and all its support files will be installed into
|
||||
a new directory named `invokeai` located at the location you specify.
|
||||
|
||||
- The default is to install the `invokeai` directory in your home
|
||||
directory, usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||
`/home/YourName/invokeai` on Linux systems, and
|
||||
`/Users/YourName/invokeai` on Macintoshes, where "YourName" is your
|
||||
login name.
|
||||
|
||||
- The script uses tab autocompletion to suggest directory path
|
||||
completions. Type part of the path (e.g. "C:\Users") and press
|
||||
<tab> repeatedly to suggest completions.
|
||||
|
||||
11. Sit back and let the install script work. It will install the
|
||||
third-party libraries needed by InvokeAI, then download the
|
||||
current InvokeAI release and install it.
|
||||
|
||||
Be aware that some of the library download and install steps take
|
||||
a long time. In particular, the `pytorch` package is quite large
|
||||
and often appears to get "stuck" at 99.9%. Have patience and the
|
||||
installation step will eventually resume. However, there are
|
||||
occasions when the library install does legitimately get stuck. If
|
||||
you have been waiting for more than ten minutes and nothing is
|
||||
happening, you can interrupt the script with ^C. You may restart
|
||||
it and it will pick up where it left off.
|
||||
|
||||
12. After installation completes, the installer will launch a script
|
||||
called `configure_invokeai.py`, which will guide you through the
|
||||
first-time process of selecting one or more Stable Diffusion model
|
||||
weights files, downloading and configuring them. We provide a list
|
||||
of popular models that InvokeAI performs well with. However, you
|
||||
can add more weight files later on using the command-line client
|
||||
or the Web UI. See [Installing Models](INSTALLING_MODELS.md) for details.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
13. The script will now exit and you'll be ready to generate some
|
||||
images. Look for the directory `invokeai` installed in the
|
||||
location you chose at the beginning of the install session. Look
|
||||
for a shell script named `invoke.sh` (Linux/Mac) or `invoke.bat`
|
||||
(Windows). Launch the script by double-clicking it or typing its
|
||||
name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeai
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||
load the user interface by pointing your browser at http://localhost:9090.
|
||||
|
||||
- The script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a
|
||||
command-line interface in which you can run python commands directly,
|
||||
access developer tools, and launch InvokeAI with customized options.
|
||||
|
||||
14. You can launch InvokeAI with several different command-line arguments
|
||||
that customize its behavior. For example, you can change the location
|
||||
of the inage output directory, or select your favorite sampler. See
|
||||
the [Command-Line Interface](../features/CLI.md) for a full list of
|
||||
the options.
|
||||
|
||||
- To set defaults that will take effect every time you launch InvokeAI,
|
||||
use a text editor (e.g. Notepad) to exit the file
|
||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||
follow to add and modify launch options.
|
||||
|
||||
|
||||
!!! warning "The `invokeai` directory contains the `invoke` application, its configuration files, the model weight files, and outputs of image generation. Once InvokeAI is installed, do not move or remove this directory."
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
_Package dependency conflicts_ If you have previously installed
|
||||
InvokeAI or another Stable Diffusion package, the installer may
|
||||
occasionally pick up outdated libraries and either the installer or
|
||||
`invoke` will fail with complaints about library conflicts. You can
|
||||
address this by entering the `invokeai` directory and running
|
||||
`update.sh`, which will bring InvokeAI up to date with the latest
|
||||
libraries.
|
||||
|
||||
!!! warning "Some users have tried to correct dependency problems by installing the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing ldm will make matters worse. If you've installed ldm, uninstall it with `pip uninstall ldm`."
|
||||
|
||||
_"Corrupted configuration file."__ Everything seems to install ok, but
|
||||
`invoke` complains of a corrupted configuration file and goes back
|
||||
into the configuration process (asking you to download models, etc),
|
||||
but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive
|
||||
in the `invokeai\invokeai.init` initialization file that contains
|
||||
startup settings. The easiest way to fix the problem is to move the
|
||||
file out of the way and re-run `configure_invokeai.py`. Enter the
|
||||
developer's console (option 3 of the launcher script) and run this
|
||||
command:
|
||||
|
||||
```cmd
|
||||
configure_invokeai.py --root=.
|
||||
```
|
||||
|
||||
Note the dot (.) after `--root`. It is part of the command.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the
|
||||
problem to the [InvokeAI
|
||||
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `configure_invokeai` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
You may also use the `update` script to install any selected version
|
||||
of InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to
|
||||
the zip file link of the version you wish to install. You can find the
|
||||
zip links by going to the one of the release pages and looking for the
|
||||
**Assets** section at the bottom. Alternatively, you can browse
|
||||
"branches" and "tags" at the top of the big code directory on the
|
||||
InvokeAI welcome page. When you find the version you want to install,
|
||||
go to the green "<> Code" button at the top, and copy the
|
||||
"Download ZIP" link.
|
||||
|
||||
Now run `update.sh` (or `update.bat`) with the URL of the desired
|
||||
InvokeAI version as its argument. For example, this will install the
|
||||
old 2.2.0 release.
|
||||
|
||||
```cmd
|
||||
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
||||
|
||||
## Installing Python in Ubuntu
|
||||
|
||||
For reasons that are not entirely clear, installing the correct
|
||||
version of Python can be a bit of a challenge on Ubuntu, Linux Mint, and
|
||||
other Ubuntu-derived distributions.
|
||||
|
||||
In particular, Ubuntu version 20.04 LTS comes with an old version of
|
||||
Python, does not come with the PIP package manager installed, and to
|
||||
make matters worse, the `python` command points to Python2, not
|
||||
Python3.
|
||||
|
||||
Here is the quick recipe for bringing your system up to date:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install python3.9
|
||||
sudo apt install python3-pip
|
||||
cd /usr/bin
|
||||
sudo ln -sf python3.9 python3
|
||||
sudo ln -sf python3 python
|
||||
```
|
||||
|
||||
You can still access older versions of Python by calling `python2`,
|
||||
`python3.8`, etc.
|
||||
|
||||
@@ -6,7 +6,7 @@ title: Docker
|
||||
|
||||
!!! warning "For end users"
|
||||
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
||||
|
||||
!!! tip "For developers"
|
||||
|
||||
@@ -16,6 +16,10 @@ title: Docker
|
||||
|
||||
For general use, install locally to leverage your machine's GPU.
|
||||
|
||||
!!! tip "For running on a cloud instance/service"
|
||||
|
||||
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
||||
|
||||
## Why containers?
|
||||
|
||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||
@@ -36,7 +40,7 @@ development purposes it's fine. Once you're done with development tasks on your
|
||||
laptop you can build for the target platform and architecture and deploy to
|
||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
## Installation on a Linux container
|
||||
## Installation in a Linux container (desktop)
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -117,12 +121,91 @@ also do so.
|
||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||
```
|
||||
|
||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
||||
|
||||
---
|
||||
|
||||
## Running InvokeAI in the cloud with Docker
|
||||
|
||||
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
||||
|
||||
An advantage of this method is that it does not need any local setup or additional dependencies.
|
||||
|
||||
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- a `docker` runtime
|
||||
- `make` (optional but helps for convenience)
|
||||
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
||||
|
||||
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
||||
|
||||
### Building and running the image locally
|
||||
|
||||
1. Clone this repo and `cd docker-build`
|
||||
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
||||
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
||||
- `make configure` (This does *not* require a GPU-capable system)
|
||||
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
||||
- enter your Huggingface token when prompted
|
||||
1. `make web`
|
||||
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
||||
|
||||
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
||||
|
||||
#### Building and running without `make`
|
||||
|
||||
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
||||
|
||||
!!! example "Build the image and configure the runtime directory"
|
||||
```Shell
|
||||
cd docker-build
|
||||
|
||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||
|
||||
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
||||
```
|
||||
|
||||
!!! example "Run the web server"
|
||||
```Shell
|
||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
||||
```
|
||||
|
||||
Access the Web UI at http://localhost:9090
|
||||
|
||||
!!! example "Run the InvokeAI interactive CLI"
|
||||
```
|
||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
||||
```
|
||||
|
||||
### Running the image in the cloud
|
||||
|
||||
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
||||
|
||||
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
||||
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
||||
1. `docker pull` it on your cloud instance
|
||||
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
||||
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
||||
|
||||
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
||||
|
||||
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
||||
|
||||
1. create a pod using this Docker image
|
||||
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
||||
1. Run the pod with `sleep infinity` as the Docker command
|
||||
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
||||
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
||||
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
||||
|
||||
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
||||
|
||||
---
|
||||
|
||||
!!! warning "Deprecated"
|
||||
|
||||
From here on you will find the the previous Docker-Docs, which will still
|
||||
@@ -135,12 +218,12 @@ also do so.
|
||||
If you're on a **Linux container** the `invoke` script is **automatically
|
||||
started** and the output dir set to the Docker volume you created earlier.
|
||||
|
||||
If you're **directly on macOS follow these startup instructions**.
|
||||
If you're **directly on macOS follow these startup instructions**.
|
||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
||||
interface that combines the functionality of the original scripts `txt2img` and
|
||||
`img2img`:
|
||||
`img2img`:
|
||||
Use the more accurate but VRAM-intensive full precision math because
|
||||
half-precision requires autocast and won't work.
|
||||
half-precision requires autocast and won't work.
|
||||
By default the images are saved in `outputs/img-samples/`.
|
||||
|
||||
```Shell
|
||||
@@ -157,8 +240,8 @@ invoke> q
|
||||
### Text to Image
|
||||
|
||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||
image. This will let you know that everything is set up correctly.
|
||||
Then increase steps to 100 or more for good (but slower) results.
|
||||
image. This will let you know that everything is set up correctly.
|
||||
Then increase steps to 100 or more for good (but slower) results.
|
||||
The prompt can be in quotes or not.
|
||||
|
||||
```Shell
|
||||
@@ -172,8 +255,8 @@ You'll need to experiment to see if face restoration is making it better or
|
||||
worse for your specific prompt.
|
||||
|
||||
If you're on a container the output is set to the Docker volume. You can copy it
|
||||
wherever you want.
|
||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||
wherever you want.
|
||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
||||
`*.png` so you'll need to specify the image file name.
|
||||
|
||||
|
||||
@@ -2,12 +2,10 @@
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS DOCUMENTATION IS UNFINISHED - VOLUNTEERS GRATEFULLY ACCEPTED
|
||||
|
||||
## Introduction
|
||||
|
||||
We have a [Jupyter
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in
|
||||
this repo as one of the steps, so instead of cloning this repo, simply
|
||||
download the notebook from the link above and load it up in VSCode
|
||||
@@ -16,10 +14,19 @@ start running the cells one-by-one.
|
||||
|
||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||
|
||||
## Walkthrough
|
||||
## Running Online On Google Colabotary
|
||||
[](https://colab.research.google.com/github/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
|
||||
## Updating to newer versions
|
||||
## Running Locally (Cloning)
|
||||
|
||||
### Updating the stable version
|
||||
1. Install the Jupyter Notebook python library (one-time):
|
||||
pip install jupyter
|
||||
|
||||
## Troubleshooting
|
||||
2. Clone the InvokeAI repository:
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd invoke-ai
|
||||
3. Create a virtual environment using conda:
|
||||
conda create -n invoke jupyter
|
||||
4. Activate the environment and start the Jupyter notebook:
|
||||
conda activate invoke
|
||||
jupyter notebook
|
||||
|
||||
@@ -8,7 +8,7 @@ title: Manual Installation
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already expirienced with using conda or pip
|
||||
who are already experienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
|
||||
@@ -26,6 +26,9 @@ it.
|
||||
Prior to installing PyPatchMatch, you need to take the following
|
||||
steps:
|
||||
|
||||
### Debian Based Distros
|
||||
|
||||
|
||||
1. Install the `build-essential` tools:
|
||||
|
||||
```
|
||||
@@ -44,6 +47,7 @@ steps:
|
||||
```
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
4. Activate the environment you use for invokeai, either with
|
||||
`conda` or with a virtual environment.
|
||||
@@ -51,7 +55,7 @@ steps:
|
||||
5. Do a "develop" install of pypatchmatch:
|
||||
|
||||
```
|
||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
|
||||
```
|
||||
|
||||
6. Confirm that pypatchmatch is installed.
|
||||
@@ -79,8 +83,33 @@ steps:
|
||||
[link] libpatchmatch.so ...
|
||||
```
|
||||
|
||||
|
||||
### Arch Based Distros
|
||||
|
||||
1. Install the `base-devel` package:
|
||||
```
|
||||
sudo pacman -Syu
|
||||
sudo pacman -S --needed base-devel
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
```
|
||||
sudo pacman -S opencv
|
||||
```
|
||||
or for CUDA support
|
||||
```
|
||||
sudo pacman -S opencv-cuda
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
```
|
||||
cd /usr/lib/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
**Next, Follow Steps 4-6 from the Debian Section above**
|
||||
|
||||
|
||||
If you see no errors, then you're ready to go!
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ The source installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
Before you begin, make sure that you meet the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||
@@ -27,12 +26,12 @@ Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/tag/2.2.0-rc4), and
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- invokeAI-src-installer-mac.zip
|
||||
- invokeAI-src-installer-windows.zip
|
||||
- invokeAI-src-installer-linux.zip
|
||||
- [invokeAI-src-installer-2.2.3-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-mac.zip)
|
||||
- [invokeAI-src-installer-2.2.3-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-windows.zip)
|
||||
- [invokeAI-src-installer-2.2.3-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-linux.zip)
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
@@ -51,18 +50,30 @@ off the process.
|
||||
inflating: invokeAI\readme.txt
|
||||
```
|
||||
|
||||
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
3. If you are a macOS user, you may need to install the Xcode command line tools.
|
||||
These are a set of tools that are needed to run certain applications in a Terminal,
|
||||
including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
To install, open a terminal window and run `xcode-select --install`. You will get
|
||||
a macOS system popup guiding you through the install. If you already have them
|
||||
installed, you will instead see some output in the Terminal advising you that the
|
||||
tools are already installed.
|
||||
|
||||
More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
4. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
4. Alternatively, from the command line, run the shell script or .bat file:
|
||||
5. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
5. Sit back and let the install script work. It will install various binary
|
||||
6. Sit back and let the install script work. It will install various binary
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
@@ -75,7 +86,7 @@ off the process.
|
||||
and nothing is happening, you can interrupt the script with ^C. You may restart
|
||||
it and it will pick up where it left off.
|
||||
|
||||
6. After installation completes, the installer will launch a script called
|
||||
7. After installation completes, the installer will launch a script called
|
||||
`configure_invokeai.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
@@ -91,7 +102,7 @@ off the process.
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
7. The script will now exit and you'll be ready to generate some images. The
|
||||
8. The script will now exit and you'll be ready to generate some images. The
|
||||
invokeAI directory will contain numerous files. Look for a shell script
|
||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||
by double-clicking it or typing its name at the command-line:
|
||||
|
||||
@@ -5,56 +5,20 @@ title: Overview
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [InvokeAI binary installer](INSTALL_INVOKE.md)
|
||||
1. [Automated Installer](INSTALL_AUTOMATED.md)
|
||||
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. It includes access to a
|
||||
This is a script that will install all of InvokeAI's essential
|
||||
third party libraries and InvokeAI itself. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
|
||||
When a new InvokeAI release is available, you will run an `update`
|
||||
script to download and install the new version. Intermediate versions
|
||||
that contain experimental and possibly unstable features will not be
|
||||
available.
|
||||
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
care about upgrading to unreleased experimental features.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
- The tab autocomplete feature of the command-line client,
|
||||
which completes commonly used filenames and commands, will
|
||||
not work in this version. All Web UI functions are fully
|
||||
operational, however.
|
||||
|
||||
2. [InvokeAI source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install Python, the Anaconda ("conda")
|
||||
package manager, all of InvokeAI's its essential third party
|
||||
libraries and InvokeAI itself. It includes access to a "developer
|
||||
console" which will help us debug problems with you and give you
|
||||
to access experimental features.
|
||||
|
||||
When a new InvokeAI feature is available, even between releases,
|
||||
you will be able to upgrade and try it out by running an `update`
|
||||
script. This method is recommended for individuals who wish to
|
||||
stay on the cutting edge of InvokeAI development and are not
|
||||
afraid of occasional breakage.
|
||||
|
||||
**Important Caveats**
|
||||
- This script is a bit cranky and occasionally hangs or times out,
|
||||
forcing you to cancel and restart the script (it will pick up where
|
||||
it left off). It also takes noticeably longer to run than the
|
||||
binary installer.
|
||||
|
||||
2. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
those who prefer the `conda` tool, and one suited to those who prefer
|
||||
`pip` and Python virtual environments.
|
||||
`pip` and Python virtual environments. In our hands the pip install
|
||||
is faster and more reliable, but your mileage may vary.
|
||||
|
||||
This method is recommended for users who have previously used `conda`
|
||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||
@@ -68,9 +32,3 @@ experience and preferences.
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
4. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
worked on the Colab and are comfortable with the Jupyter notebook
|
||||
environment.
|
||||
|
||||
@@ -7,7 +7,7 @@ title: Manual Installation, Windows
|
||||
## **Notebook install (semi-automated)**
|
||||
|
||||
We have a
|
||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in this repo as
|
||||
one of the steps, so instead of cloning this repo, simply download the notebook
|
||||
from the link above and load it up in VSCode (with the appropriate extensions
|
||||
|
||||
@@ -42,5 +42,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -44,5 +44,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -43,5 +43,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -59,7 +59,7 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||
|
||||
@@ -13,7 +13,6 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- basicsr==1.4.1
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
@@ -44,5 +43,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# pip will resolve the version which matches torch
|
||||
albumentations
|
||||
dependency_injector==4.40.0
|
||||
diffusers
|
||||
diffusers==0.10.*
|
||||
einops
|
||||
eventlet
|
||||
facexlib
|
||||
@@ -10,6 +10,7 @@ flask_cors==3.0.10
|
||||
flask_socketio==5.3.0
|
||||
flaskwebgui==0.3.7
|
||||
getpass_asterisk
|
||||
gfpgan==1.3.8
|
||||
huggingface-hub
|
||||
imageio
|
||||
imageio-ffmpeg
|
||||
@@ -17,6 +18,7 @@ kornia
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
picklescan
|
||||
pillow
|
||||
pip>=22
|
||||
pudb
|
||||
@@ -31,11 +33,8 @@ taming-transformers-rom1504
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity
|
||||
torchmetrics
|
||||
transformers==4.21.*
|
||||
picklescan
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan ; platform_system == 'Windows'
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan ; platform_system != 'Windows'
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
transformers==4.25.*
|
||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.4.zip#egg=pypatchmatch
|
||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
torch
|
||||
torchvision
|
||||
-e .
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
basicsr==1.4.1
|
||||
torch==1.12.1
|
||||
torchvision==0.13.1
|
||||
-e .
|
||||
|
||||
@@ -62,7 +62,7 @@ const PromptInput = () => {
|
||||
<Textarea
|
||||
id="prompt"
|
||||
name="prompt"
|
||||
placeholder="I'm dreaming of..."
|
||||
placeholder="Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)"
|
||||
size={'lg'}
|
||||
value={prompt}
|
||||
onChange={handleChangePrompt}
|
||||
|
||||
48
installer/create_installer.sh
Executable file
48
installer/create_installer.sh
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
VERSION=$(grep ^VERSION ../setup.py | awk '{ print $3 }' | sed "s/'//g" )
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
echo Building installer zip fles for InvokeAI v$VERSION
|
||||
|
||||
# get rid of any old ones
|
||||
rm *.zip
|
||||
|
||||
rm -rf InvokeAI-Installer
|
||||
mkdir InvokeAI-Installer
|
||||
|
||||
cp -pr ../environments-and-requirements templates readme.txt InvokeAI-Installer/
|
||||
mkdir InvokeAI-Installer/templates/rootdir
|
||||
|
||||
cp -pr ../configs InvokeAI-Installer/templates/rootdir/
|
||||
|
||||
mkdir InvokeAI-Installer/templates/rootdir/{outputs,embeddings,models}
|
||||
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+rx InvokeAI-Installer/install.sh
|
||||
|
||||
zip -r InvokeAI-installer-$VERSION-linux.zip InvokeAI-Installer
|
||||
zip -r InvokeAI-installer-$VERSION-mac.zip InvokeAI-Installer
|
||||
|
||||
# now do the windows installer
|
||||
rm InvokeAI-Installer/install.sh
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
# this gets rid of the "-e ." at the end of the windows requirements file
|
||||
# because it is easier to do it now than in the .bat install script
|
||||
egrep -v '^-e .' InvokeAI-Installer/environments-and-requirements/requirements-win-colab-cuda.txt >requirements.txt
|
||||
mv requirements.txt InvokeAI-Installer/environments-and-requirements/requirements-win-colab-cuda.txt
|
||||
zip -r InvokeAI-installer-$VERSION-windows.zip InvokeAI-Installer
|
||||
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer
|
||||
|
||||
|
||||
exit 0
|
||||
|
||||
|
||||
215
installer/install.bat.in
Normal file
215
installer/install.bat.in
Normal file
@@ -0,0 +1,215 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
@rem This script requires the user to install Python 3.9 or higher. All other
|
||||
@rem requirements are downloaded as needed.
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
@rem Config
|
||||
@rem this should be changed to the tagged release!
|
||||
@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/refs/tags/2.2.4-rc1.zip
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
set MINIMUM_PYTHON_VERSION=3.9.0
|
||||
set PYTHON_URL=https://www.python.org/downloads/release/python-3109/
|
||||
|
||||
|
||||
set err_msg=An error has occurred and the script could not continue.
|
||||
|
||||
@rem --------------------------- Intro -------------------------------
|
||||
echo This script will install InvokeAI and its dependencies. Before you start,
|
||||
echo please make sure to do the following:
|
||||
echo 1. Install python 3.9 or higher.
|
||||
echo 2. Double-click on the file WinLongPathsEnabled.reg in order to
|
||||
echo enable long path support on your system.
|
||||
echo 3. Some users have found they need to install the Visual C++ core
|
||||
echo libraries or else they experience DLL loading problems at the end of the install.
|
||||
echo Visual C++ is very likely already installed on your system, but if you get DLL
|
||||
echo issues, please download and install the libraries by going to:
|
||||
echo https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
echo.
|
||||
echo See %INSTRUCTIONS% for more details.
|
||||
echo.
|
||||
pause
|
||||
|
||||
@rem ---------------------------- check Python version ---------------
|
||||
echo ***** Checking and Updating Python *****
|
||||
|
||||
call python --version >.tmp1 2>.tmp2
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Please install Python 3.9 or higher. See %INSTRUCTIONS% for details.
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i
|
||||
if "%python_version%" == "" (
|
||||
set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.9 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version%
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.9 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
echo Updating PIP...
|
||||
call python -m pip install --no-warn-script-location -q --upgrade pip
|
||||
|
||||
@rem --------------------- Get the requirements file ------------
|
||||
echo.
|
||||
echo Setting up requirements file for your system.
|
||||
copy /y environments-and-requirements\requirements-win-colab-cuda.txt .\requirements.txt
|
||||
|
||||
@rem --------------------- Get the root directory for installation ------------
|
||||
set rootdir=""
|
||||
set response=""
|
||||
set selection=""
|
||||
:pick_rootdir
|
||||
if %rootdir% neq "" goto :done
|
||||
set /p selection=Select the path to install InvokeAI's directory into [%UserProfile%]:
|
||||
if %selection% == "" set selection=%UserProfile%
|
||||
set dest=%selection%\invokeai
|
||||
if exist %dest% (
|
||||
set response=y
|
||||
set /p response=The directory %dest% exists. Do you wish to resume install from a previous attempt? [Y/n]:
|
||||
if !response! == "" set response=y
|
||||
if /I !response! == y (set rootdir=%dest%) else (goto :pick_rootdir)
|
||||
) else (
|
||||
set rootdir=!dest!
|
||||
)
|
||||
set response=y
|
||||
set /p response="You have chosen to install InvokeAI into %rootdir%. OK? [Y/n]: "
|
||||
if !response! == "" set response=y
|
||||
if /I !response! neq y set rootdir=""
|
||||
goto :pick_rootdir
|
||||
:done
|
||||
|
||||
@rem ---------------------- Initialize the runtime directory ---------------------
|
||||
echo.
|
||||
echo *** Creating Runtime Directory %rootdir% ***
|
||||
if not exist %rootdir% mkdir %rootdir%
|
||||
@rem for unknown reasons the mkdir works but returns an error code
|
||||
if not exist %rootdir% (
|
||||
set err_msg=Could not create the directory %rootdir%. Please check the directory's permissions and try again.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Successful.
|
||||
|
||||
@rem --------------------------- Create and populate .venv ---------------------------
|
||||
echo.
|
||||
echo ** Creating Virtual Environment for InvokeAI **
|
||||
call python -mvenv %rootdir%\.venv
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Could not create virtual environment %rootdir%\.venv. Please check the directory's permissions and try again.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Successful.
|
||||
|
||||
echo.
|
||||
echo *** Installing InvokeAI Requirements ***
|
||||
call %rootdir%\.venv\Scripts\activate.bat
|
||||
copy environments-and-requirements\requirements-win-colab-cuda.txt .\requirements.txt
|
||||
call python -mpip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Installation of requirements failed. See above for errors and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Installation successful.
|
||||
|
||||
echo.
|
||||
echo *** Installing InvokeAI Modules and Executables ***
|
||||
call python -mpip install %INVOKE_AI_SRC%
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Installation of InvokeAI failed. See above for errors and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Installation successful.
|
||||
|
||||
@rem --------------------------- Set up the root directory ---------------------------
|
||||
xcopy /E /Y .\templates\rootdir %rootdir%
|
||||
PUSHD "%rootdir%"
|
||||
call .venv\Scripts\python .venv\Scripts\configure_invokeai.py --root="%rootdir%"
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Configuration failed. See above for error messages and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
POPD
|
||||
copy .\templates\invoke.bat.in %rootdir%\invoke.bat
|
||||
copy .\templates\update.bat.in %rootdir%\update.bat
|
||||
|
||||
@rem so that update.bat works
|
||||
mkdir %rootdir%\environments-and-requirements
|
||||
xcopy /I /Y .\environments-and-requirements %rootdir%\environments-and-requirements
|
||||
copy .\requirements.txt %rootdir%\requirements.txt
|
||||
|
||||
|
||||
echo.
|
||||
echo ***** Finished configuration *****
|
||||
echo All done. Execute the file %rootdir%\invoke.bat to start InvokeAI.
|
||||
pause
|
||||
deactivate
|
||||
exit
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
@rem found at https://stackoverflow.com/questions/15807762/compare-version-numbers-in-batch-file
|
||||
:compareVersions
|
||||
::
|
||||
:: Compares two version numbers and returns the result in the ERRORLEVEL
|
||||
::
|
||||
:: Returns 1 if version1 > version2
|
||||
:: 0 if version1 = version2
|
||||
:: -1 if version1 < version2
|
||||
::
|
||||
:: The nodes must be delimited by . or , or -
|
||||
::
|
||||
:: Nodes are normally strictly numeric, without a 0 prefix. A letter suffix
|
||||
:: is treated as a separate node
|
||||
::
|
||||
setlocal enableDelayedExpansion
|
||||
set "v1=%~1"
|
||||
set "v2=%~2"
|
||||
call :divideLetters v1
|
||||
call :divideLetters v2
|
||||
:loop
|
||||
call :parseNode "%v1%" n1 v1
|
||||
call :parseNode "%v2%" n2 v2
|
||||
if %n1% gtr %n2% exit /b 1
|
||||
if %n1% lss %n2% exit /b -1
|
||||
if not defined v1 if not defined v2 exit /b 0
|
||||
if not defined v1 exit /b -1
|
||||
if not defined v2 exit /b 1
|
||||
goto :loop
|
||||
|
||||
|
||||
:parseNode version nodeVar remainderVar
|
||||
for /f "tokens=1* delims=.,-" %%A in ("%~1") do (
|
||||
set "%~2=%%A"
|
||||
set "%~3=%%B"
|
||||
)
|
||||
exit /b
|
||||
|
||||
|
||||
:divideLetters versionVar
|
||||
for %%C in (a b c d e f g h i j k l m n o p q r s t u v w x y z) do set "%~1=!%~1:%%C=.%%C!"
|
||||
exit /b
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
echo The installer will exit now.
|
||||
pause
|
||||
exit /b
|
||||
|
||||
216
installer/install.sh.in
Normal file
216
installer/install.sh.in
Normal file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
# make sure we are not already in a venv
|
||||
# (don't need to check status)
|
||||
deactivate >/dev/null 2>&1
|
||||
|
||||
# this should be changed to the tagged release!
|
||||
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/refs/tags/2.2.4-rc1.zip
|
||||
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
MINIMUM_PYTHON_VERSION=3.9.0
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "You may need to use the Xcode command line tools to proceed. See step number 3 of"
|
||||
echo "https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#walk_through for"
|
||||
echo "installation instructions and then run this script again."
|
||||
else
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
fi
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
function readinput() {
|
||||
local CLEAN_ARGS=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
local i="$1"
|
||||
case "$i" in
|
||||
"-i")
|
||||
if read -i "default" 2>/dev/null <<< "test"; then
|
||||
CLEAN_ARGS="$CLEAN_ARGS -i \"$2\""
|
||||
fi
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
"-p")
|
||||
CLEAN_ARGS="$CLEAN_ARGS -p \"$2\""
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
CLEAN_ARGS="$CLEAN_ARGS $1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
eval read $CLEAN_ARGS
|
||||
}
|
||||
|
||||
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
echo "InvokeAI simple installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
read -n 1 -s -r -p "<Press any key to start the install>"
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="osx";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
echo "Installing for $OS_NAME-$OS_ARCH"
|
||||
# confirm that python is installed and is up to date
|
||||
|
||||
PYTHON=""
|
||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
||||
if ppath=`which $candidate`; then
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
echo Python $python_version found at $PYTHON
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$PYTHON" ]; then
|
||||
echo "A suitable Python interpreter could not be found"
|
||||
echo "Please install Python 3.9 or higher before running this script. See instructions at $INSTRUCTIONS for help."
|
||||
read -p "Press any key to exit"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
xcode_path=$(xcode-select --print-path)
|
||||
_err_exit $? "xcode_path command not found"
|
||||
export CPPFLAGS="-I$xcode_path/Library/Frameworks/Python3.framework/Versions/Current/Headers"
|
||||
echo "Will compile wheels with CPPFLAGS=$CPPFLAGS"
|
||||
fi
|
||||
|
||||
ROOTDIR=""
|
||||
while [ "$ROOTDIR" == "" ]
|
||||
do
|
||||
echo
|
||||
readinput -e -p "Select your preferred location for the 'invokeai' directory [$HOME]: " -i $HOME input
|
||||
ROOTDIR=${input:=$HOME}/invokeai
|
||||
read -e -p "InvokeAI will be installed into $ROOTDIR. OK? [y]: " input
|
||||
RESPONSE=${input:='y'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
if [ -e $ROOTDIR ]; then
|
||||
echo
|
||||
read -e -p "Directory $ROOTDIR already exists. Do you want to resume an interrupted install? [y]: " input
|
||||
RESPONSE=${input:='y'}
|
||||
if [ "$RESPONSE" != 'y' ]; then
|
||||
ROOTDIR=""
|
||||
fi
|
||||
else
|
||||
mkdir -p $ROOTDIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Could not create $ROOTDIR. Try again with a different install location."
|
||||
ROOTDIR=""
|
||||
fi
|
||||
fi
|
||||
else
|
||||
ROOTDIR=""
|
||||
fi
|
||||
done
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "** Creating Virtual Environment for InvokeAI **"
|
||||
|
||||
$PYTHON -mpip install --upgrade pip
|
||||
$PYTHON -mvenv $ROOTDIR/.venv
|
||||
_err_exit $? "Python failed to create virtual environment $ROOTDIR/.venv. Please see $TROUBLESHOOTING for help."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "** Activating Virtual Environment for InvokeAI **"
|
||||
|
||||
source $ROOTDIR/.venv/bin/activate
|
||||
_err_exit $? "Failed to activate virtual evironment $ROOTDIR/.venv. Please see $TROUBLESHOOTING for help."
|
||||
|
||||
PYTHON=$ROOTDIR/.venv/bin/python
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Installing InvokeAI Dependencies ***"
|
||||
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "macOS detected. Installing MPS and CPU support."
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-mac-mps-cpu.txt >requirements.txt
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-lin-amd.txt >requirements.txt
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-lin-cuda.txt >requirements.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
$PYTHON -mpip install -r requirements.txt
|
||||
_err_exit $? "Failed to install InvokeAI's dependencies."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Installing InvokeAI Modules and Executables ***"
|
||||
$PYTHON -mpip install $INVOKE_AI_SRC
|
||||
_err_exit $? "Installation of InvokeAI failed."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo " *** Setting Up Root Directory $ROOTDIR *** "
|
||||
cp -pr templates/rootdir/* $ROOTDIR/
|
||||
cp templates/invoke.sh.in $ROOTDIR/invoke.sh
|
||||
chmod a+rx $ROOTDIR/invoke.sh
|
||||
cp templates/update.sh.in $ROOTDIR/update.sh
|
||||
chmod a+rx $ROOTDIR/update.sh
|
||||
|
||||
# This allows the updater to work!
|
||||
cp -pr environments-and-requirements requirements.txt $ROOTDIR/
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Confguring InvokeAI ***"
|
||||
pushd $ROOTDIR
|
||||
./.venv/bin/configure_invokeai.py --root=$ROOTDIR
|
||||
_err_exit $? "Initial configuration failed. Please see above error messages and $TROUBLESHOOTING for help."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
popd
|
||||
cp templates/invoke.sh.in $ROOTDIR/invoke.sh
|
||||
chmod a+rx $ROOTDIR/invoke.sh
|
||||
|
||||
cp templates/update.sh.in $ROOTDIR/update.sh
|
||||
chmod a+rx $ROOTDIR/update.sh
|
||||
|
||||
echo "You may now run InvokeAI by entering the directory $ROOTDIR and running invoke.sh"
|
||||
52
installer/readme.txt
Normal file
52
installer/readme.txt
Normal file
@@ -0,0 +1,52 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Preparations:
|
||||
|
||||
You will need to install Python 3.9 or higher for this installer
|
||||
to work. Instructions are given here:
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
|
||||
Before you start the installer, please open up your system's command
|
||||
line window (Terminal or Command) and type the commands:
|
||||
|
||||
python --version
|
||||
|
||||
If all is well, it will print "Python 3.X.X", where the version number
|
||||
is at least 3.9.1
|
||||
|
||||
If this works, check the version of the Python package manager, pip:
|
||||
|
||||
pip --version
|
||||
|
||||
You should get a message that indicates that the pip package
|
||||
installer was derived from Python 3.9 or 3.10. For example:
|
||||
"pip 22.3.1 from /usr/bin/pip (python 3.9)"
|
||||
|
||||
Long Paths on Windows:
|
||||
|
||||
If you are on Windows, you will need to enable Windows Long Paths to
|
||||
run InvokeAI successfully. If you're not sure what this is, you
|
||||
almost certainly need to do this.
|
||||
|
||||
Simply double-click the "WinLongPathsEnabled.reg" file located in
|
||||
this directory, and approve the Windows warnings. Note that you will
|
||||
need to have admin privileges in order to do this.
|
||||
|
||||
Launching the installer:
|
||||
|
||||
Windows: double-click the 'install.bat' file (while keeping it inside
|
||||
the InvokeAI-Installer folder).
|
||||
|
||||
Linux and Mac: Please open the terminal application and run
|
||||
'./install.sh' (while keeping it inside the InvokeAI-Installer
|
||||
folder).
|
||||
|
||||
The installer will create a directory named "invokeai" in the folder
|
||||
of your choice. This directory contains everything you need to run
|
||||
invokeai. Once InvokeAI is up and running, you may delete the
|
||||
InvokeAI-Installer folder at your convenience.
|
||||
|
||||
For more information, please see
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
37
installer/templates/invoke.bat.in
Normal file
37
installer/templates/invoke.bat.in
Normal file
@@ -0,0 +1,37 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
setlocal
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
set INVOKEAI_ROOT=.
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
python .venv\Scripts\invoke.py %*
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python .venv\Scripts\invoke.py --web %*
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
endlocal
|
||||
24
source_installer/invoke.sh.in → installer/templates/invoke.sh.in
Executable file → Normal file
24
source_installer/invoke.sh.in → installer/templates/invoke.sh.in
Executable file → Normal file
@@ -1,20 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -eu
|
||||
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
. .venv/bin/activate
|
||||
|
||||
conda activate invokeai
|
||||
export INVOKEAI_ROOT="$scriptdir"
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
 export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||

|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
@@ -22,8 +22,8 @@ if [ "$0" != "bash" ]; then
|
||||
echo "3. open the developer console"
|
||||
read -p "Please enter 1, 2, or 3: " yn
|
||||
case $yn in
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; python scripts/invoke.py;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; python scripts/invoke.py --web;;
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; .venv/bin/python .venv/bin/invoke.py $*;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; .venv/bin/python .venv/bin/invoke.py --web $*;;
|
||||
3 ) printf "\nDeveloper Console:\n"; file_name=$(basename "${BASH_SOURCE[0]}"); bash --init-file "$file_name";;
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
||||
52
installer/templates/update.bat.in
Normal file
52
installer/templates/update.bat.in
Normal file
@@ -0,0 +1,52 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
set arg=%1
|
||||
if "%arg%" neq "" (
|
||||
if "%arg:~0,4%" neq "http" (
|
||||
echo Usage: update.bat ^<release URL^>.zip
|
||||
echo Updates InvokeAI to use the indicated version of the code base.
|
||||
echo Find the zip file for the release you want, and pass it as the argument.
|
||||
echo For example update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
echo.
|
||||
echo If no argument provided then will install the most recent development version, equivalent to
|
||||
echo update.bat https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
exit /b
|
||||
) else (
|
||||
set INVOKE_AI_SRC=%arg%
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
pause
|
||||
|
||||
call pip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
call pip install !INVOKE_AI_SRC!
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .venv\Scripts\python .venv\Scripts\configure_invokeai.py --root="%rootdir%"
|
||||
|
||||
if %errorlevel% neq 0 (
|
||||
echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
endlocal
|
||||
|
||||
52
installer/templates/update.sh.in
Normal file
52
installer/templates/update.sh.in
Normal file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ge 1 ] && [ "${1:0:4}" != "http" ]; then
|
||||
echo "Usage: update.sh <release URL>.zip"
|
||||
echo "Updates InvokeAI to use the indicated version of the code base."
|
||||
echo "Find the zip file for the release you want, and pass it as the argument."
|
||||
echo "For example update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.3.zip"
|
||||
echo ""
|
||||
echo "If no argument provided then will install the most recent development version, equivalent to"
|
||||
echo "update.sh https://github.com/invoke-ai/InvokeAI/archive/main.zip"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
INVOKE_AI_SRC=${1:-https://github.com/invoke-ai/InvokeAI/archive/main.zip}
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies from $INVOKE_AI_SRC.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
pip install -r requirements.txt
|
||||
_err_exit $? "The pip program failed to install InvokeAI's requirements."
|
||||
|
||||
pip install $INVOKE_AI_SRC
|
||||
_err_exit $? "The pip program failed to install InvokeAI."
|
||||
|
||||
python .venv/bin/configure_invoke.py
|
||||
_err_exit $? "The configure script failed to run successfully."
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -20,6 +20,8 @@ import cv2
|
||||
import skimage
|
||||
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
import ldm.invoke.conditioning
|
||||
from ldm.invoke.generator.base import downsampling
|
||||
from PIL import Image, ImageOps
|
||||
from torch import nn
|
||||
@@ -40,7 +42,7 @@ from ldm.invoke.model_cache import ModelCache
|
||||
from ldm.invoke.seamless import configure_model_padding
|
||||
from ldm.invoke.txt2mask import Txt2Mask, SegmentedGrayscale
|
||||
from ldm.invoke.concepts_lib import Concepts
|
||||
|
||||
|
||||
def fix_func(orig):
|
||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||
def new_func(*args, **kw):
|
||||
@@ -129,7 +131,6 @@ gr = Generate(
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class Generate:
|
||||
"""Generate class
|
||||
Stores default values for multiple configuration items
|
||||
@@ -235,7 +236,7 @@ class Generate:
|
||||
except Exception:
|
||||
print('** An error was encountered while installing the safety checker:')
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
def prompt2png(self, prompt, outdir, **kwargs):
|
||||
"""
|
||||
Takes a prompt and an output directory, writes out the requested number
|
||||
@@ -329,7 +330,7 @@ class Generate:
|
||||
infill_method = infill_methods[0], # The infill method to use
|
||||
force_outpaint: bool = False,
|
||||
enable_image_debugging = False,
|
||||
|
||||
|
||||
**args,
|
||||
): # eat up additional cruft
|
||||
"""
|
||||
@@ -372,7 +373,7 @@ class Generate:
|
||||
def process_image(image,seed):
|
||||
image.save(f{'images/seed.png'})
|
||||
|
||||
The code used to save images to a directory can be found in ldm/invoke/pngwriter.py.
|
||||
The code used to save images to a directory can be found in ldm/invoke/pngwriter.py.
|
||||
It contains code to create the requested output directory, select a unique informative
|
||||
name for each image, and write the prompt into the PNG metadata.
|
||||
"""
|
||||
@@ -455,7 +456,7 @@ class Generate:
|
||||
try:
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
prompt, model =self.model,
|
||||
skip_normalize=skip_normalize,
|
||||
skip_normalize_legacy_blend=skip_normalize,
|
||||
log_tokens =self.log_tokenization
|
||||
)
|
||||
|
||||
@@ -589,7 +590,7 @@ class Generate:
|
||||
seed = opt.seed or args.seed
|
||||
if seed is None or seed < 0:
|
||||
seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
|
||||
|
||||
prompt = opt.prompt or args.prompt or ''
|
||||
print(f'>> using seed {seed} and prompt "{prompt}" for {image_path}')
|
||||
|
||||
@@ -607,8 +608,8 @@ class Generate:
|
||||
# todo: cross-attention control
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
prompt, model =self.model,
|
||||
skip_normalize=opt.skip_normalize,
|
||||
log_tokens =opt.log_tokenization
|
||||
skip_normalize_legacy_blend=opt.skip_normalize,
|
||||
log_tokens =ldm.invoke.conditioning.log_tokenization
|
||||
)
|
||||
|
||||
if tool in ('gfpgan','codeformer','upscale'):
|
||||
@@ -641,7 +642,7 @@ class Generate:
|
||||
|
||||
opt.seed = seed
|
||||
opt.prompt = prompt
|
||||
|
||||
|
||||
if len(extend_instructions) > 0:
|
||||
restorer = Outcrop(image,self,)
|
||||
return restorer.process (
|
||||
@@ -683,7 +684,7 @@ class Generate:
|
||||
image_callback = callback,
|
||||
prefix = prefix
|
||||
)
|
||||
|
||||
|
||||
elif tool is None:
|
||||
print(f'* please provide at least one postprocessing option, such as -G or -U')
|
||||
return None
|
||||
@@ -706,13 +707,13 @@ class Generate:
|
||||
|
||||
if embiggen is not None:
|
||||
return self._make_embiggen()
|
||||
|
||||
|
||||
if inpainting_model_in_use:
|
||||
return self._make_omnibus()
|
||||
|
||||
if ((init_image is not None) and (mask_image is not None)) or force_outpaint:
|
||||
return self._make_inpaint()
|
||||
|
||||
|
||||
if init_image is not None:
|
||||
return self._make_img2img()
|
||||
|
||||
@@ -743,7 +744,7 @@ class Generate:
|
||||
if self._has_transparency(image):
|
||||
self._transparency_check_and_warning(image, mask, force_outpaint)
|
||||
init_mask = self._create_init_mask(image, width, height, fit=fit)
|
||||
|
||||
|
||||
if (image.width * image.height) > (self.width * self.height) and self.size_matters:
|
||||
print(">> This input is larger than your defaults. If you run out of memory, please use a smaller image.")
|
||||
self.size_matters = False
|
||||
@@ -757,9 +758,9 @@ class Generate:
|
||||
elif text_mask:
|
||||
init_mask = self._txt2mask(image, text_mask, width, height, fit=fit)
|
||||
|
||||
if invert_mask:
|
||||
if init_mask and invert_mask:
|
||||
init_mask = ImageOps.invert(init_mask)
|
||||
|
||||
|
||||
return init_image,init_mask
|
||||
|
||||
# lots o' repeated code here! Turn into a make_func()
|
||||
@@ -818,7 +819,7 @@ class Generate:
|
||||
self.set_model(self.model_name)
|
||||
|
||||
def set_model(self,model_name):
|
||||
"""
|
||||
"""
|
||||
Given the name of a model defined in models.yaml, will load and initialize it
|
||||
and return the model object. Previously-used models will be cached.
|
||||
"""
|
||||
@@ -830,7 +831,7 @@ class Generate:
|
||||
if not cache.valid_model(model_name):
|
||||
print(f'** "{model_name}" is not a known model name. Please check your models.yaml file')
|
||||
return self.model
|
||||
|
||||
|
||||
cache.print_vram_usage()
|
||||
|
||||
# have to get rid of all references to model in order
|
||||
@@ -839,7 +840,7 @@ class Generate:
|
||||
self.sampler = None
|
||||
self.generators = {}
|
||||
gc.collect()
|
||||
|
||||
|
||||
model_data = cache.get_model(model_name)
|
||||
if model_data is None: # restore previous
|
||||
model_data = cache.get_model(self.model_name)
|
||||
@@ -852,7 +853,7 @@ class Generate:
|
||||
|
||||
# uncache generators so they pick up new models
|
||||
self.generators = {}
|
||||
|
||||
|
||||
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
self.model.embedding_manager.load(
|
||||
@@ -901,7 +902,7 @@ class Generate:
|
||||
image_callback = None,
|
||||
prefix = None,
|
||||
):
|
||||
|
||||
|
||||
for r in image_list:
|
||||
image, seed = r
|
||||
try:
|
||||
@@ -911,7 +912,7 @@ class Generate:
|
||||
if self.gfpgan is None:
|
||||
print('>> GFPGAN not found. Face restoration is disabled.')
|
||||
else:
|
||||
image = self.gfpgan.process(image, strength, seed)
|
||||
image = self.gfpgan.process(image, strength, seed)
|
||||
if facetool == 'codeformer':
|
||||
if self.codeformer is None:
|
||||
print('>> CodeFormer not found. Face restoration is disabled.')
|
||||
|
||||
@@ -8,6 +8,7 @@ import time
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.invoke.prompt_parser import PromptParser
|
||||
from ldm.invoke.readline import get_completer, Completer
|
||||
@@ -27,7 +28,7 @@ def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
print('* Initializing, be patient...')
|
||||
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
if not args:
|
||||
@@ -45,9 +46,8 @@ def main():
|
||||
args.max_loaded_models = 1
|
||||
|
||||
# alert - setting globals here
|
||||
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))
|
||||
Globals.try_patchmatch = args.patchmatch
|
||||
|
||||
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
|
||||
# loading here to avoid long delays on startup
|
||||
@@ -68,6 +68,8 @@ def main():
|
||||
if opt.embeddings:
|
||||
if not os.path.isabs(opt.embedding_path):
|
||||
embedding_path = os.path.normpath(os.path.join(Globals.root,opt.embedding_path))
|
||||
else:
|
||||
embedding_path = opt.embedding_path
|
||||
else:
|
||||
embedding_path = None
|
||||
|
||||
@@ -279,7 +281,7 @@ def main_loop(gen, opt):
|
||||
prefix = file_writer.unique_prefix()
|
||||
step_callback = make_step_callback(gen, opt, prefix) if opt.save_intermediates > 0 else None
|
||||
|
||||
def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None):
|
||||
def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None, prompt_in=None, attention_maps_image=None):
|
||||
# note the seed is the seed of the current image
|
||||
# the first_seed is the original seed that noise is added to
|
||||
# when the -v switch is used to generate variations
|
||||
@@ -308,7 +310,7 @@ def main_loop(gen, opt):
|
||||
if use_prefix is not None:
|
||||
prefix = use_prefix
|
||||
postprocessed = upscaled if upscaled else operation=='postprocess'
|
||||
opt.prompt = gen.concept_lib().replace_triggers_with_concepts(opt.prompt) # to avoid the problem of non-unique concept triggers
|
||||
opt.prompt = gen.concept_lib().replace_triggers_with_concepts(opt.prompt or prompt_in) # to avoid the problem of non-unique concept triggers
|
||||
filename, formatted_dream_prompt = prepare_image_metadata(
|
||||
opt,
|
||||
prefix,
|
||||
@@ -339,8 +341,8 @@ def main_loop(gen, opt):
|
||||
filename,
|
||||
tool,
|
||||
formatted_dream_prompt,
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
if (not postprocessed) or opt.save_original:
|
||||
# only append to results if we didn't overwrite an earlier output
|
||||
results.append([path, formatted_dream_prompt])
|
||||
@@ -430,7 +432,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
|
||||
add_embedding_terms(gen, completer)
|
||||
completer.add_history(command)
|
||||
operation = None
|
||||
|
||||
|
||||
elif command.startswith('!models'):
|
||||
gen.model_cache.print_models()
|
||||
completer.add_history(command)
|
||||
@@ -531,7 +533,7 @@ def add_weights_to_config(model_path:str, gen, opt, completer):
|
||||
|
||||
completer.complete_extensions(('.yaml','.yml'))
|
||||
completer.linebuffer = 'configs/stable-diffusion/v1-inference.yaml'
|
||||
|
||||
|
||||
done = False
|
||||
while not done:
|
||||
new_config['config'] = input('Configuration file for this model: ')
|
||||
@@ -562,7 +564,7 @@ def add_weights_to_config(model_path:str, gen, opt, completer):
|
||||
print('** Please enter a valid integer between 64 and 2048')
|
||||
|
||||
make_default = input('Make this the default model? [n] ') in ('y','Y')
|
||||
|
||||
|
||||
if write_config_file(opt.conf, gen, model_name, new_config, make_default=make_default):
|
||||
completer.add_model(model_name)
|
||||
|
||||
@@ -575,14 +577,14 @@ def del_config(model_name:str, gen, opt, completer):
|
||||
gen.model_cache.commit(opt.conf)
|
||||
print(f'** {model_name} deleted')
|
||||
completer.del_model(model_name)
|
||||
|
||||
|
||||
def edit_config(model_name:str, gen, opt, completer):
|
||||
config = gen.model_cache.config
|
||||
|
||||
|
||||
if model_name not in config:
|
||||
print(f'** Unknown model {model_name}')
|
||||
return
|
||||
|
||||
|
||||
print(f'\n>> Editing model {model_name} from configuration file {opt.conf}')
|
||||
|
||||
conf = config[model_name]
|
||||
@@ -595,10 +597,10 @@ def edit_config(model_name:str, gen, opt, completer):
|
||||
make_default = input('Make this the default model? [n] ') in ('y','Y')
|
||||
completer.complete_extensions(None)
|
||||
write_config_file(opt.conf, gen, model_name, new_config, clobber=True, make_default=make_default)
|
||||
|
||||
|
||||
def write_config_file(conf_path, gen, model_name, new_config, clobber=False, make_default=False):
|
||||
current_model = gen.model_name
|
||||
|
||||
|
||||
op = 'modify' if clobber else 'import'
|
||||
print('\n>> New configuration:')
|
||||
if make_default:
|
||||
@@ -621,7 +623,7 @@ def write_config_file(conf_path, gen, model_name, new_config, clobber=False, mak
|
||||
gen.model_cache.set_default_model(model_name)
|
||||
|
||||
gen.model_cache.commit(conf_path)
|
||||
|
||||
|
||||
do_switch = input(f'Keep model loaded? [y]')
|
||||
if len(do_switch)==0 or do_switch[0] in ('y','Y'):
|
||||
pass
|
||||
@@ -651,7 +653,7 @@ def do_postprocess (gen, opt, callback):
|
||||
opt.prompt = opt.new_prompt
|
||||
else:
|
||||
opt.prompt = None
|
||||
|
||||
|
||||
if os.path.dirname(file_path) == '': #basename given
|
||||
file_path = os.path.join(opt.outdir,file_path)
|
||||
|
||||
@@ -716,7 +718,7 @@ def add_postprocessing_to_metadata(opt,original_file,new_file,tool,command):
|
||||
)
|
||||
meta['image']['postprocessing'] = pp
|
||||
write_metadata(new_file,meta)
|
||||
|
||||
|
||||
def prepare_image_metadata(
|
||||
opt,
|
||||
prefix,
|
||||
@@ -787,28 +789,28 @@ def get_next_command(infile=None) -> str: # command string
|
||||
print(f'#{command}')
|
||||
return command
|
||||
|
||||
def invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan):
|
||||
def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan):
|
||||
print('\n* --web was specified, starting web server...')
|
||||
from backend.invoke_ai_web_server import InvokeAIWebServer
|
||||
# Change working directory to the stable-diffusion directory
|
||||
os.chdir(
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
)
|
||||
|
||||
|
||||
invoke_ai_web_server = InvokeAIWebServer(generate=gen, gfpgan=gfpgan, codeformer=codeformer, esrgan=esrgan)
|
||||
|
||||
try:
|
||||
invoke_ai_web_server.run()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
def add_embedding_terms(gen,completer):
|
||||
'''
|
||||
Called after setting the model, updates the autocompleter with
|
||||
any terms loaded by the embedding manager.
|
||||
'''
|
||||
completer.add_embedding_terms(gen.model.embedding_manager.list_terms())
|
||||
|
||||
|
||||
def split_variations(variations_string) -> list:
|
||||
# shotgun parsing, woo
|
||||
parts = []
|
||||
@@ -865,7 +867,7 @@ def make_step_callback(gen, opt, prefix):
|
||||
image = gen.sample_to_image(img)
|
||||
image.save(filename,'PNG')
|
||||
return callback
|
||||
|
||||
|
||||
def retrieve_dream_command(opt,command,completer):
|
||||
'''
|
||||
Given a full or partial path to a previously-generated image file,
|
||||
@@ -873,7 +875,7 @@ def retrieve_dream_command(opt,command,completer):
|
||||
and pop it into the readline buffer (linux, Mac), or print out a comment
|
||||
for cut-and-paste (windows)
|
||||
|
||||
Given a wildcard path to a folder with image png files,
|
||||
Given a wildcard path to a folder with image png files,
|
||||
will retrieve and format the dream command used to generate the images,
|
||||
and save them to a file commands.txt for further processing
|
||||
'''
|
||||
@@ -909,7 +911,7 @@ def write_commands(opt, file_path:str, outfilepath:str):
|
||||
except ValueError:
|
||||
print(f'## "{basename}": unacceptable pattern')
|
||||
return
|
||||
|
||||
|
||||
commands = []
|
||||
cmd = None
|
||||
for path in paths:
|
||||
@@ -938,7 +940,7 @@ def emergency_model_reconfigure():
|
||||
print(' After reconfiguration is done, please relaunch invoke.py. ')
|
||||
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
|
||||
print('configure_invokeai is launching....\n')
|
||||
|
||||
|
||||
sys.argv = ['configure_invokeai','--interactive']
|
||||
import configure_invokeai
|
||||
configure_invokeai.main()
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
__version__='2.2.4'
|
||||
|
||||
@@ -119,7 +119,7 @@ PRECISION_CHOICES = [
|
||||
|
||||
# is there a way to pick this up during git commits?
|
||||
APP_ID = 'invoke-ai/InvokeAI'
|
||||
APP_VERSION = 'v2.2.0'
|
||||
APP_VERSION = 'v2.2.4'
|
||||
|
||||
class ArgFormatter(argparse.RawTextHelpFormatter):
|
||||
# use defined argument order to display usage
|
||||
@@ -172,14 +172,20 @@ class Args(object):
|
||||
'''Parse the shell switches and store.'''
|
||||
try:
|
||||
sysargs = sys.argv[1:]
|
||||
initfile = os.path.expanduser(Globals.initfile)
|
||||
# pre-parse to get the root directory; ignore the rest
|
||||
switches = self._arg_parser.parse_args(sysargs)
|
||||
Globals.root = switches.root_dir or Globals.root
|
||||
|
||||
# now use root directory to find the init file
|
||||
initfile = os.path.expanduser(os.path.join(Globals.root,Globals.initfile))
|
||||
legacyinit = os.path.expanduser('~/.invokeai')
|
||||
if os.path.exists(initfile):
|
||||
print(f'>> Initialization file {initfile} found. Loading...')
|
||||
sysargs.insert(0,f'@{initfile}')
|
||||
else:
|
||||
from ldm.invoke.CLI import emergency_model_reconfigure
|
||||
emergency_model_reconfigure()
|
||||
sys.exit(-1)
|
||||
elif os.path.exists(legacyinit):
|
||||
print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.')
|
||||
sysargs.insert(0,f'@{legacyinit}')
|
||||
|
||||
self._arg_switches = self._arg_parser.parse_args(sysargs)
|
||||
return self._arg_switches
|
||||
except Exception as e:
|
||||
@@ -411,7 +417,7 @@ class Args(object):
|
||||
model_group.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will try to read from ~/.invokeai and then from environment variable INVOKEAI_ROOT. Defaults to the current directory as a last resort.',
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai.',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--config',
|
||||
|
||||
@@ -7,20 +7,46 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
|
||||
|
||||
'''
|
||||
import re
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
|
||||
from .prompt_parser import PromptParser, Blend, FlattenedPrompt, \
|
||||
CrossAttentionControlledFragment, CrossAttentionControlSubstitute, Fragment, log_tokenization
|
||||
CrossAttentionControlledFragment, CrossAttentionControlSubstitute, Fragment
|
||||
from ..models.diffusion import cross_attention_control
|
||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder
|
||||
|
||||
|
||||
def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_normalize=False):
|
||||
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
|
||||
prompt, negative_prompt = get_prompt_structure(prompt_string,
|
||||
skip_normalize_legacy_blend=skip_normalize_legacy_blend)
|
||||
conditioning = _get_conditioning_for_prompt(prompt, negative_prompt, model, log_tokens)
|
||||
|
||||
return conditioning
|
||||
|
||||
|
||||
def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = False) -> (
|
||||
Union[FlattenedPrompt, Blend], FlattenedPrompt):
|
||||
"""
|
||||
parse the passed-in prompt string and return tuple (positive_prompt, negative_prompt)
|
||||
"""
|
||||
prompt, negative_prompt = _parse_prompt_string(prompt_string,
|
||||
skip_normalize_legacy_blend=skip_normalize_legacy_blend)
|
||||
return prompt, negative_prompt
|
||||
|
||||
|
||||
def get_tokens_for_prompt(model, parsed_prompt: FlattenedPrompt) -> [str]:
|
||||
text_fragments = [x.text if type(x) is Fragment else
|
||||
(" ".join([f.text for f in x.original]) if type(x) is CrossAttentionControlSubstitute else
|
||||
str(x))
|
||||
for x in parsed_prompt.children]
|
||||
text = " ".join(text_fragments)
|
||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||
return tokens
|
||||
|
||||
|
||||
def _parse_prompt_string(prompt_string_uncleaned, skip_normalize_legacy_blend=False) -> Union[FlattenedPrompt, Blend]:
|
||||
# Extract Unconditioned Words From Prompt
|
||||
unconditioned_words = ''
|
||||
unconditional_regex = r'\[(.*?)\]'
|
||||
@@ -39,7 +65,7 @@ def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_n
|
||||
pp = PromptParser()
|
||||
|
||||
parsed_prompt: Union[FlattenedPrompt, Blend] = None
|
||||
legacy_blend: Blend = pp.parse_legacy_blend(prompt_string_cleaned)
|
||||
legacy_blend: Blend = pp.parse_legacy_blend(prompt_string_cleaned, skip_normalize_legacy_blend)
|
||||
if legacy_blend is not None:
|
||||
parsed_prompt = legacy_blend
|
||||
else:
|
||||
@@ -47,118 +73,150 @@ def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_n
|
||||
parsed_prompt = pp.parse_conjunction(prompt_string_cleaned).prompts[0]
|
||||
|
||||
parsed_negative_prompt: FlattenedPrompt = pp.parse_conjunction(unconditioned_words).prompts[0]
|
||||
return parsed_prompt, parsed_negative_prompt
|
||||
|
||||
|
||||
def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], parsed_negative_prompt: FlattenedPrompt,
|
||||
model, log_tokens=False) \
|
||||
-> tuple[torch.Tensor, torch.Tensor, InvokeAIDiffuserComponent.ExtraConditioningInfo]:
|
||||
"""
|
||||
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
|
||||
"""
|
||||
|
||||
if log_tokens:
|
||||
print(f">> Parsed prompt to {parsed_prompt}")
|
||||
print(f">> Parsed negative prompt to {parsed_negative_prompt}")
|
||||
|
||||
conditioning = None
|
||||
cac_args:cross_attention_control.Arguments = None
|
||||
cac_args: cross_attention_control.Arguments = None
|
||||
|
||||
if type(parsed_prompt) is Blend:
|
||||
blend: Blend = parsed_prompt
|
||||
embeddings_to_blend = None
|
||||
for i,flattened_prompt in enumerate(blend.prompts):
|
||||
this_embedding, _ = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
flattened_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label=f"(blend part {i+1}, weight={blend.weights[i]})" )
|
||||
embeddings_to_blend = this_embedding if embeddings_to_blend is None else torch.cat(
|
||||
(embeddings_to_blend, this_embedding))
|
||||
conditioning = WeightedFrozenCLIPEmbedder.apply_embedding_weights(embeddings_to_blend.unsqueeze(0),
|
||||
blend.weights,
|
||||
normalize=blend.normalize_weights)
|
||||
else:
|
||||
flattened_prompt: FlattenedPrompt = parsed_prompt
|
||||
wants_cross_attention_control = type(flattened_prompt) is not Blend \
|
||||
and any([issubclass(type(x), CrossAttentionControlledFragment) for x in flattened_prompt.children])
|
||||
if wants_cross_attention_control:
|
||||
original_prompt = FlattenedPrompt()
|
||||
edited_prompt = FlattenedPrompt()
|
||||
# for name, a0, a1, b0, b1 in edit_opcodes: only name == 'equal' is currently parsed
|
||||
original_token_count = 0
|
||||
edited_token_count = 0
|
||||
edit_opcodes = []
|
||||
edit_options = []
|
||||
for fragment in flattened_prompt.children:
|
||||
if type(fragment) is CrossAttentionControlSubstitute:
|
||||
original_prompt.append(fragment.original)
|
||||
edited_prompt.append(fragment.edited)
|
||||
conditioning = _get_conditioning_for_blend(model, parsed_prompt, log_tokens)
|
||||
elif type(parsed_prompt) is FlattenedPrompt:
|
||||
if parsed_prompt.wants_cross_attention_control:
|
||||
conditioning, cac_args = _get_conditioning_for_cross_attention_control(model, parsed_prompt, log_tokens)
|
||||
|
||||
to_replace_token_count = get_tokens_length(model, fragment.original)
|
||||
replacement_token_count = get_tokens_length(model, fragment.edited)
|
||||
edit_opcodes.append(('replace',
|
||||
original_token_count, original_token_count + to_replace_token_count,
|
||||
edited_token_count, edited_token_count + replacement_token_count
|
||||
))
|
||||
original_token_count += to_replace_token_count
|
||||
edited_token_count += replacement_token_count
|
||||
edit_options.append(fragment.options)
|
||||
#elif type(fragment) is CrossAttentionControlAppend:
|
||||
# edited_prompt.append(fragment.fragment)
|
||||
else:
|
||||
# regular fragment
|
||||
original_prompt.append(fragment)
|
||||
edited_prompt.append(fragment)
|
||||
|
||||
count = get_tokens_length(model, [fragment])
|
||||
edit_opcodes.append(('equal', original_token_count, original_token_count+count, edited_token_count, edited_token_count+count))
|
||||
edit_options.append(None)
|
||||
original_token_count += count
|
||||
edited_token_count += count
|
||||
original_embeddings, original_tokens = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
original_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap originals)")
|
||||
# naïvely building a single edited_embeddings like this disregards the effects of changing the absolute location of
|
||||
# subsequent tokens when there is >1 edit and earlier edits change the total token count.
|
||||
# eg "a cat.swap(smiling dog, s_start=0.5) eating a hotdog.swap(pizza)" - when the 'pizza' edit is active but the
|
||||
# 'cat' edit is not, the 'pizza' feature vector will nevertheless be affected by the introduction of the extra
|
||||
# token 'smiling' in the inactive 'cat' edit.
|
||||
# todo: build multiple edited_embeddings, one for each edit, and pass just the edited fragments through to the CrossAttentionControl functions
|
||||
edited_embeddings, edited_tokens = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
edited_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap replacements)")
|
||||
|
||||
conditioning = original_embeddings
|
||||
edited_conditioning = edited_embeddings
|
||||
#print('>> got edit_opcodes', edit_opcodes, 'options', edit_options)
|
||||
cac_args = cross_attention_control.Arguments(
|
||||
edited_conditioning = edited_conditioning,
|
||||
edit_opcodes = edit_opcodes,
|
||||
edit_options = edit_options
|
||||
)
|
||||
else:
|
||||
conditioning, _ = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
flattened_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(prompt)")
|
||||
conditioning, _ = _get_embeddings_and_tokens_for_prompt(model,
|
||||
parsed_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(prompt)")
|
||||
else:
|
||||
raise ValueError(f"parsed_prompt is '{type(parsed_prompt)}' which is not a supported prompt type")
|
||||
|
||||
unconditioning, _ = build_embeddings_and_tokens_for_flattened_prompt(model,
|
||||
parsed_negative_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(unconditioning)")
|
||||
unconditioning, _ = _get_embeddings_and_tokens_for_prompt(model,
|
||||
parsed_negative_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(unconditioning)")
|
||||
if isinstance(conditioning, dict):
|
||||
# hybrid conditioning is in play
|
||||
unconditioning, conditioning = flatten_hybrid_conditioning(unconditioning, conditioning)
|
||||
unconditioning, conditioning = _flatten_hybrid_conditioning(unconditioning, conditioning)
|
||||
if cac_args is not None:
|
||||
print(">> Hybrid conditioning cannot currently be combined with cross attention control. Cross attention control will be ignored.")
|
||||
print(
|
||||
">> Hybrid conditioning cannot currently be combined with cross attention control. Cross attention control will be ignored.")
|
||||
cac_args = None
|
||||
|
||||
eos_token_index = 1
|
||||
if type(parsed_prompt) is not Blend:
|
||||
tokens = get_tokens_for_prompt(model, parsed_prompt)
|
||||
eos_token_index = len(tokens)+1
|
||||
return (
|
||||
unconditioning, conditioning, InvokeAIDiffuserComponent.ExtraConditioningInfo(
|
||||
tokens_count_including_eos_bos=eos_token_index + 1,
|
||||
cross_attention_control_args=cac_args
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def build_token_edit_opcodes(original_tokens, edited_tokens):
|
||||
original_tokens = original_tokens.cpu().numpy()[0]
|
||||
edited_tokens = edited_tokens.cpu().numpy()[0]
|
||||
def _get_conditioning_for_cross_attention_control(model, prompt: FlattenedPrompt, log_tokens: bool = True):
|
||||
original_prompt = FlattenedPrompt()
|
||||
edited_prompt = FlattenedPrompt()
|
||||
# for name, a0, a1, b0, b1 in edit_opcodes: only name == 'equal' is currently parsed
|
||||
original_token_count = 0
|
||||
edited_token_count = 0
|
||||
edit_options = []
|
||||
edit_opcodes = []
|
||||
# beginning of sequence
|
||||
edit_opcodes.append(
|
||||
('equal', original_token_count, original_token_count + 1, edited_token_count, edited_token_count + 1))
|
||||
edit_options.append(None)
|
||||
original_token_count += 1
|
||||
edited_token_count += 1
|
||||
for fragment in prompt.children:
|
||||
if type(fragment) is CrossAttentionControlSubstitute:
|
||||
original_prompt.append(fragment.original)
|
||||
edited_prompt.append(fragment.edited)
|
||||
|
||||
return SequenceMatcher(None, original_tokens, edited_tokens).get_opcodes()
|
||||
to_replace_token_count = _get_tokens_length(model, fragment.original)
|
||||
replacement_token_count = _get_tokens_length(model, fragment.edited)
|
||||
edit_opcodes.append(('replace',
|
||||
original_token_count, original_token_count + to_replace_token_count,
|
||||
edited_token_count, edited_token_count + replacement_token_count
|
||||
))
|
||||
original_token_count += to_replace_token_count
|
||||
edited_token_count += replacement_token_count
|
||||
edit_options.append(fragment.options)
|
||||
# elif type(fragment) is CrossAttentionControlAppend:
|
||||
# edited_prompt.append(fragment.fragment)
|
||||
else:
|
||||
# regular fragment
|
||||
original_prompt.append(fragment)
|
||||
edited_prompt.append(fragment)
|
||||
|
||||
def build_embeddings_and_tokens_for_flattened_prompt(model, flattened_prompt: FlattenedPrompt, log_tokens: bool=False, log_display_label: str=None):
|
||||
count = _get_tokens_length(model, [fragment])
|
||||
edit_opcodes.append(('equal', original_token_count, original_token_count + count, edited_token_count,
|
||||
edited_token_count + count))
|
||||
edit_options.append(None)
|
||||
original_token_count += count
|
||||
edited_token_count += count
|
||||
# end of sequence
|
||||
edit_opcodes.append(
|
||||
('equal', original_token_count, original_token_count + 1, edited_token_count, edited_token_count + 1))
|
||||
edit_options.append(None)
|
||||
original_token_count += 1
|
||||
edited_token_count += 1
|
||||
original_embeddings, original_tokens = _get_embeddings_and_tokens_for_prompt(model,
|
||||
original_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap originals)")
|
||||
# naïvely building a single edited_embeddings like this disregards the effects of changing the absolute location of
|
||||
# subsequent tokens when there is >1 edit and earlier edits change the total token count.
|
||||
# eg "a cat.swap(smiling dog, s_start=0.5) eating a hotdog.swap(pizza)" - when the 'pizza' edit is active but the
|
||||
# 'cat' edit is not, the 'pizza' feature vector will nevertheless be affected by the introduction of the extra
|
||||
# token 'smiling' in the inactive 'cat' edit.
|
||||
# todo: build multiple edited_embeddings, one for each edit, and pass just the edited fragments through to the CrossAttentionControl functions
|
||||
edited_embeddings, edited_tokens = _get_embeddings_and_tokens_for_prompt(model,
|
||||
edited_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label="(.swap replacements)")
|
||||
conditioning = original_embeddings
|
||||
edited_conditioning = edited_embeddings
|
||||
# print('>> got edit_opcodes', edit_opcodes, 'options', edit_options)
|
||||
cac_args = cross_attention_control.Arguments(
|
||||
edited_conditioning=edited_conditioning,
|
||||
edit_opcodes=edit_opcodes,
|
||||
edit_options=edit_options
|
||||
)
|
||||
return conditioning, cac_args
|
||||
|
||||
|
||||
def _get_conditioning_for_blend(model, blend: Blend, log_tokens: bool = False):
|
||||
embeddings_to_blend = None
|
||||
for i, flattened_prompt in enumerate(blend.prompts):
|
||||
this_embedding, _ = _get_embeddings_and_tokens_for_prompt(model,
|
||||
flattened_prompt,
|
||||
log_tokens=log_tokens,
|
||||
log_display_label=f"(blend part {i + 1}, weight={blend.weights[i]})")
|
||||
embeddings_to_blend = this_embedding if embeddings_to_blend is None else torch.cat(
|
||||
(embeddings_to_blend, this_embedding))
|
||||
conditioning = WeightedFrozenCLIPEmbedder.apply_embedding_weights(embeddings_to_blend.unsqueeze(0),
|
||||
blend.weights,
|
||||
normalize=blend.normalize_weights)
|
||||
return conditioning
|
||||
|
||||
|
||||
def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedPrompt, log_tokens: bool = False,
|
||||
log_display_label: str = None):
|
||||
if type(flattened_prompt) is not FlattenedPrompt:
|
||||
raise Exception(f"embeddings can only be made from FlattenedPrompts, got {type(flattened_prompt)} instead")
|
||||
fragments = [x.text for x in flattened_prompt.children]
|
||||
@@ -170,12 +228,14 @@ def build_embeddings_and_tokens_for_flattened_prompt(model, flattened_prompt: Fl
|
||||
|
||||
return embeddings, tokens
|
||||
|
||||
def get_tokens_length(model, fragments: list[Fragment]):
|
||||
|
||||
def _get_tokens_length(model, fragments: list[Fragment]):
|
||||
fragment_texts = [x.text for x in fragments]
|
||||
tokens = model.cond_stage_model.get_tokens(fragment_texts, include_start_and_end_markers=False)
|
||||
return sum([len(x) for x in tokens])
|
||||
|
||||
def flatten_hybrid_conditioning(uncond, cond):
|
||||
|
||||
def _flatten_hybrid_conditioning(uncond, cond):
|
||||
'''
|
||||
This handles the choice between a conditional conditioning
|
||||
that is a tensor (used by cross attention) vs one that has additional
|
||||
@@ -194,4 +254,29 @@ def flatten_hybrid_conditioning(uncond, cond):
|
||||
cond_flattened[k] = torch.cat([uncond[k], cond[k]])
|
||||
return uncond, cond_flattened
|
||||
|
||||
|
||||
|
||||
def log_tokenization(text, model, display_label=None):
|
||||
""" shows how the prompt is tokenized
|
||||
# usually tokens have '</w>' to indicate end-of-word,
|
||||
# but for readability it has been replaced with ' '
|
||||
"""
|
||||
|
||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||
tokenized = ""
|
||||
discarded = ""
|
||||
usedTokens = 0
|
||||
totalTokens = len(tokens)
|
||||
for i in range(0, totalTokens):
|
||||
token = tokens[i].replace('</w>', ' ')
|
||||
# alternate color
|
||||
s = (usedTokens % 6) + 1
|
||||
if i < model.cond_stage_model.max_length:
|
||||
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
||||
usedTokens += 1
|
||||
else: # over max token length
|
||||
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||
print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m")
|
||||
if discarded != "":
|
||||
print(
|
||||
f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m"
|
||||
)
|
||||
|
||||
@@ -14,6 +14,7 @@ import cv2 as cv
|
||||
from einops import rearrange, repeat
|
||||
from pytorch_lightning import seed_everything
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from ldm.util import rand_perlin_2d
|
||||
|
||||
downsampling = 8
|
||||
@@ -51,9 +52,12 @@ class Generator():
|
||||
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
|
||||
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
||||
safety_checker:dict=None,
|
||||
attention_maps_callback = None,
|
||||
**kwargs):
|
||||
scope = choose_autocast(self.precision)
|
||||
self.safety_checker = safety_checker
|
||||
attention_maps_images = []
|
||||
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
||||
make_image = self.get_make_image(
|
||||
prompt,
|
||||
sampler = sampler,
|
||||
@@ -63,6 +67,7 @@ class Generator():
|
||||
step_callback = step_callback,
|
||||
threshold = threshold,
|
||||
perlin = perlin,
|
||||
attention_maps_callback = attention_maps_callback,
|
||||
**kwargs
|
||||
)
|
||||
results = []
|
||||
@@ -98,12 +103,13 @@ class Generator():
|
||||
results.append([image, seed])
|
||||
|
||||
if image_callback is not None:
|
||||
image_callback(image, seed, first_seed=first_seed)
|
||||
attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1]
|
||||
image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image)
|
||||
|
||||
seed = self.new_seed()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def sample_to_image(self,samples)->Image.Image:
|
||||
"""
|
||||
Given samples returned from a sampler, converts
|
||||
@@ -166,12 +172,12 @@ class Generator():
|
||||
blurred_init_mask = pil_init_mask
|
||||
|
||||
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
|
||||
|
||||
|
||||
# Paste original on color-corrected generation (using blurred mask)
|
||||
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
||||
return matched_result
|
||||
|
||||
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(self,samples):
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
@@ -219,11 +225,11 @@ class Generator():
|
||||
(txt2img) or from the latent image (img2img, inpaint)
|
||||
"""
|
||||
raise NotImplementedError("get_noise() must be implemented in a descendent class")
|
||||
|
||||
|
||||
def get_perlin_noise(self,width,height):
|
||||
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
||||
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
||||
|
||||
|
||||
def new_seed(self):
|
||||
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
return self.seed
|
||||
@@ -325,4 +331,4 @@ class Generator():
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
image.save(filepath,'PNG')
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class Embiggen(Generator):
|
||||
image = make_image()
|
||||
results.append([image, seed])
|
||||
if image_callback is not None:
|
||||
image_callback(image, seed)
|
||||
image_callback(image, seed, prompt_in=prompt)
|
||||
seed = self.new_seed()
|
||||
return results
|
||||
|
||||
|
||||
@@ -48,6 +48,10 @@ class Img2Img(Generator):
|
||||
torch.tensor([t_enc]).to(self.model.device),
|
||||
noise=x_T
|
||||
)
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
# decode it
|
||||
samples = sampler.decode(
|
||||
z_enc,
|
||||
@@ -61,6 +65,9 @@ class Img2Img(Generator):
|
||||
all_timesteps_count = steps
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to("cpu")
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
return make_image
|
||||
@@ -87,4 +94,4 @@ class Img2Img(Generator):
|
||||
image = torch.from_numpy(image)
|
||||
if normalize:
|
||||
image = 2.0 * image - 1.0
|
||||
return image.to(self.model.device)
|
||||
return image.to(self.model.device)
|
||||
|
||||
@@ -14,7 +14,9 @@ class Txt2Img(Generator):
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0,**kwargs):
|
||||
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0,
|
||||
attention_maps_callback=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and the initial image
|
||||
Return value depends on the seed at the time you call it
|
||||
@@ -33,7 +35,7 @@ class Txt2Img(Generator):
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
|
||||
sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
@@ -49,6 +51,7 @@ class Txt2Img(Generator):
|
||||
eta = ddim_eta,
|
||||
img_callback = step_callback,
|
||||
threshold = threshold,
|
||||
attention_maps_callback = attention_maps_callback,
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
|
||||
@@ -5,7 +5,9 @@ otherwise have to be passed through long and complex call chains.
|
||||
It defines a Namespace object named "Globals" that contains
|
||||
the attributes:
|
||||
|
||||
- root - the root directory under which "models" and "outputs" can be found
|
||||
- root - the root directory under which "models" and "outputs" can be found
|
||||
- initfile - path to the initialization file
|
||||
- try_patchmatch - option to globally disable loading of 'patchmatch' module
|
||||
'''
|
||||
|
||||
import os
|
||||
@@ -14,10 +16,10 @@ from argparse import Namespace
|
||||
Globals = Namespace()
|
||||
|
||||
# This is usually overwritten by the command line and/or environment variables
|
||||
Globals.root = '.'
|
||||
Globals.root = os.environ.get('INVOKEAI_ROOT') or os.path.expanduser('~/invokeai')
|
||||
|
||||
# Where to look for the initialization file
|
||||
Globals.initfile = os.path.expanduser('~/.invokeai')
|
||||
Globals.initfile = 'invokeai.init'
|
||||
|
||||
# Awkward workaround to disable attempted loading of pypatchmatch
|
||||
# which is causing CI tests to error out.
|
||||
|
||||
@@ -227,7 +227,9 @@ class ModelCache(object):
|
||||
model_hash = self._cached_sha256(weights,weight_bytes)
|
||||
sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu')
|
||||
del weight_bytes
|
||||
sd = sd['state_dict']
|
||||
# merged models from auto11 merge board are flat for some reason
|
||||
if 'state_dict' in sd:
|
||||
sd = sd['state_dict']
|
||||
model = instantiate_from_config(omega_config.model)
|
||||
model.load_state_dict(sd, strict=False)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Union, Optional
|
||||
import re
|
||||
import pyparsing as pp
|
||||
'''
|
||||
This module parses prompt strings and produces tree-like structures that can be used generate and control the conditioning tensors.
|
||||
This module parses prompt strings and produces tree-like structures that can be used generate and control the conditioning tensors.
|
||||
weighted subprompts.
|
||||
|
||||
Useful class exports:
|
||||
@@ -69,6 +69,12 @@ class FlattenedPrompt():
|
||||
return len(self.children) == 0 or \
|
||||
(len(self.children) == 1 and len(self.children[0].text) == 0)
|
||||
|
||||
@property
|
||||
def wants_cross_attention_control(self):
|
||||
return any(
|
||||
[issubclass(type(x), CrossAttentionControlledFragment) for x in self.children]
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"FlattenedPrompt:{self.children}"
|
||||
def __eq__(self, other):
|
||||
@@ -240,6 +246,12 @@ class Blend():
|
||||
self.weights = weights
|
||||
self.normalize_weights = normalize_weights
|
||||
|
||||
@property
|
||||
def wants_cross_attention_control(self):
|
||||
# blends cannot cross-attention control
|
||||
return False
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return f"Blend:{self.prompts} | weights {' ' if self.normalize_weights else '(non-normalized) '}{self.weights}"
|
||||
def __eq__(self, other):
|
||||
@@ -277,8 +289,8 @@ class PromptParser():
|
||||
|
||||
return self.flatten(root[0])
|
||||
|
||||
def parse_legacy_blend(self, text: str) -> Optional[Blend]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=False)
|
||||
def parse_legacy_blend(self, text: str, skip_normalize: bool) -> Optional[Blend]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
|
||||
if len(weighted_subprompts) <= 1:
|
||||
return None
|
||||
strings = [x[0] for x in weighted_subprompts]
|
||||
@@ -287,7 +299,7 @@ class PromptParser():
|
||||
parsed_conjunctions = [self.parse_conjunction(x) for x in strings]
|
||||
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
|
||||
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=True)
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
|
||||
|
||||
|
||||
def flatten(self, root: Conjunction, verbose = False) -> Conjunction:
|
||||
@@ -641,27 +653,3 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||
return [(x[0], equal_weight) for x in parsed_prompts]
|
||||
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]
|
||||
|
||||
|
||||
# shows how the prompt is tokenized
|
||||
# usually tokens have '</w>' to indicate end-of-word,
|
||||
# but for readability it has been replaced with ' '
|
||||
def log_tokenization(text, model, display_label=None):
|
||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||
tokenized = ""
|
||||
discarded = ""
|
||||
usedTokens = 0
|
||||
totalTokens = len(tokens)
|
||||
for i in range(0, totalTokens):
|
||||
token = tokens[i].replace('</w>', ' ')
|
||||
# alternate color
|
||||
s = (usedTokens % 6) + 1
|
||||
if i < model.cond_stage_model.max_length:
|
||||
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
||||
usedTokens += 1
|
||||
else: # over max token length
|
||||
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||
print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m")
|
||||
if discarded != "":
|
||||
print(
|
||||
f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
|
||||
)
|
||||
|
||||
@@ -53,7 +53,6 @@ COMMANDS = (
|
||||
'--codeformer_fidelity','-cf',
|
||||
'--upscale','-U',
|
||||
'-save_orig','--save_original',
|
||||
'--skip_normalize','-x',
|
||||
'--log_tokenization','-t',
|
||||
'--hires_fix',
|
||||
'--inpaint_replace','-r',
|
||||
@@ -117,19 +116,19 @@ class Completer(object):
|
||||
# extensions defined, so go directly into path completion mode
|
||||
if self.extensions is not None:
|
||||
self.matches = self._path_completions(text, state, self.extensions)
|
||||
|
||||
|
||||
# looking for an image file
|
||||
elif re.search(path_regexp,buffer):
|
||||
do_shortcut = re.search('^'+'|'.join(IMG_FILE_COMMANDS),buffer)
|
||||
self.matches = self._path_completions(text, state, IMG_EXTENSIONS,shortcut_ok=do_shortcut)
|
||||
|
||||
# looking for a seed
|
||||
elif re.search('(-S\s*|--seed[=\s])\d*$',buffer):
|
||||
elif re.search('(-S\s*|--seed[=\s])\d*$',buffer):
|
||||
self.matches= self._seed_completions(text,state)
|
||||
|
||||
elif re.search('<[\w-]*$',buffer):
|
||||
elif re.search('<[\w-]*$',buffer):
|
||||
self.matches= self._concept_completions(text,state)
|
||||
|
||||
|
||||
# looking for a model
|
||||
elif re.match('^'+'|'.join(MODEL_COMMANDS),buffer):
|
||||
self.matches= self._model_completions(text, state)
|
||||
@@ -227,7 +226,7 @@ class Completer(object):
|
||||
if h_len < 1:
|
||||
print('<empty history>')
|
||||
return
|
||||
|
||||
|
||||
for i in range(0,h_len):
|
||||
line = self.get_history_item(i+1)
|
||||
if match and match not in line:
|
||||
@@ -367,7 +366,7 @@ class DummyCompleter(Completer):
|
||||
def __init__(self,options):
|
||||
super().__init__(options)
|
||||
self.history = list()
|
||||
|
||||
|
||||
def add_history(self,line):
|
||||
self.history.append(line)
|
||||
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import enum
|
||||
from typing import Optional
|
||||
import math
|
||||
from typing import Optional, Callable
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
# adapted from bloc97's CrossAttentionControl colab
|
||||
# https://github.com/bloc97/CrossAttentionControl
|
||||
|
||||
|
||||
class Arguments:
|
||||
def __init__(self, edited_conditioning: torch.Tensor, edit_opcodes: list[tuple], edit_options: dict):
|
||||
"""
|
||||
@@ -63,9 +65,13 @@ class Context:
|
||||
self.clear_requests(cleanup=True)
|
||||
|
||||
def register_cross_attention_modules(self, model):
|
||||
for name,module in get_attention_modules(model, CrossAttentionType.SELF):
|
||||
for name,module in get_cross_attention_modules(model, CrossAttentionType.SELF):
|
||||
if name in self.self_cross_attention_module_identifiers:
|
||||
assert False, f"name {name} cannot appear more than once"
|
||||
self.self_cross_attention_module_identifiers.append(name)
|
||||
for name,module in get_attention_modules(model, CrossAttentionType.TOKENS):
|
||||
for name,module in get_cross_attention_modules(model, CrossAttentionType.TOKENS):
|
||||
if name in self.tokens_cross_attention_module_identifiers:
|
||||
assert False, f"name {name} cannot appear more than once"
|
||||
self.tokens_cross_attention_module_identifiers.append(name)
|
||||
|
||||
def request_save_attention_maps(self, cross_attention_type: CrossAttentionType):
|
||||
@@ -166,6 +172,135 @@ class Context:
|
||||
map_dict[offset] = slice.to('cpu')
|
||||
|
||||
|
||||
|
||||
class InvokeAICrossAttentionMixin:
|
||||
"""
|
||||
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
|
||||
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
|
||||
and dymamic slicing strategy selection.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.mem_total_gb = psutil.virtual_memory().total // (1 << 30)
|
||||
self.attention_slice_wrangler = None
|
||||
self.slicing_strategy_getter = None
|
||||
self.attention_slice_calculated_callback = None
|
||||
|
||||
def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]):
|
||||
'''
|
||||
Set custom attention calculator to be called when attention is calculated
|
||||
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
|
||||
which returns either the suggested_attention_slice or an adjusted equivalent.
|
||||
`module` is the current CrossAttention module for which the callback is being invoked.
|
||||
`suggested_attention_slice` is the default-calculated attention slice
|
||||
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
|
||||
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
|
||||
|
||||
Pass None to use the default attention calculation.
|
||||
:return:
|
||||
'''
|
||||
self.attention_slice_wrangler = wrangler
|
||||
|
||||
def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]):
|
||||
self.slicing_strategy_getter = getter
|
||||
|
||||
def set_attention_slice_calculated_callback(self, callback: Optional[Callable[[torch.Tensor], None]]):
|
||||
self.attention_slice_calculated_callback = callback
|
||||
|
||||
def einsum_lowest_level(self, query, key, value, dim, offset, slice_size):
|
||||
# calculate attention scores
|
||||
#attention_scores = torch.einsum('b i d, b j d -> b i j', q, k)
|
||||
attention_scores = torch.baddbmm(
|
||||
torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device),
|
||||
query,
|
||||
key.transpose(-1, -2),
|
||||
beta=0,
|
||||
alpha=self.scale,
|
||||
)
|
||||
|
||||
# calculate attention slice by taking the best scores for each latent pixel
|
||||
default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype)
|
||||
attention_slice_wrangler = self.attention_slice_wrangler
|
||||
if attention_slice_wrangler is not None:
|
||||
attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size)
|
||||
else:
|
||||
attention_slice = default_attention_slice
|
||||
|
||||
if self.attention_slice_calculated_callback is not None:
|
||||
self.attention_slice_calculated_callback(attention_slice, dim, offset, slice_size)
|
||||
|
||||
hidden_states = torch.bmm(attention_slice, value)
|
||||
return hidden_states
|
||||
|
||||
def einsum_op_slice_dim0(self, q, k, v, slice_size):
|
||||
r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
for i in range(0, q.shape[0], slice_size):
|
||||
end = i + slice_size
|
||||
r[i:end] = self.einsum_lowest_level(q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size)
|
||||
return r
|
||||
|
||||
def einsum_op_slice_dim1(self, q, k, v, slice_size):
|
||||
r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
for i in range(0, q.shape[1], slice_size):
|
||||
end = i + slice_size
|
||||
r[:, i:end] = self.einsum_lowest_level(q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size)
|
||||
return r
|
||||
|
||||
def einsum_op_mps_v1(self, q, k, v):
|
||||
if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1]))
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
def einsum_op_mps_v2(self, q, k, v):
|
||||
if self.mem_total_gb > 8 and q.shape[1] <= 4096:
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
return self.einsum_op_slice_dim0(q, k, v, 1)
|
||||
|
||||
def einsum_op_tensor_mem(self, q, k, v, max_tensor_mb):
|
||||
size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20)
|
||||
if size_mb <= max_tensor_mb:
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length()
|
||||
if div <= q.shape[0]:
|
||||
return self.einsum_op_slice_dim0(q, k, v, q.shape[0] // div)
|
||||
return self.einsum_op_slice_dim1(q, k, v, max(q.shape[1] // div, 1))
|
||||
|
||||
def einsum_op_cuda(self, q, k, v):
|
||||
# check if we already have a slicing strategy (this should only happen during cross-attention controlled generation)
|
||||
slicing_strategy_getter = self.slicing_strategy_getter
|
||||
if slicing_strategy_getter is not None:
|
||||
(dim, slice_size) = slicing_strategy_getter(self)
|
||||
if dim is not None:
|
||||
# print("using saved slicing strategy with dim", dim, "slice size", slice_size)
|
||||
if dim == 0:
|
||||
return self.einsum_op_slice_dim0(q, k, v, slice_size)
|
||||
elif dim == 1:
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
# fallback for when there is no saved strategy, or saved strategy does not slice
|
||||
mem_free_total = get_mem_free_total(q.device)
|
||||
# Divide factor of safety as there's copying and fragmentation
|
||||
return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20))
|
||||
|
||||
|
||||
def get_invokeai_attention_mem_efficient(self, q, k, v):
|
||||
if q.device.type == 'cuda':
|
||||
#print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device))
|
||||
return self.einsum_op_cuda(q, k, v)
|
||||
|
||||
if q.device.type == 'mps' or q.device.type == 'cpu':
|
||||
if self.mem_total_gb >= 32:
|
||||
return self.einsum_op_mps_v1(q, k, v)
|
||||
return self.einsum_op_mps_v2(q, k, v)
|
||||
|
||||
# Smaller slices are faster due to L2/L3/SLC caches.
|
||||
# Tested on i7 with 8MB L3 cache.
|
||||
return self.einsum_op_tensor_mem(q, k, v, 32)
|
||||
|
||||
|
||||
|
||||
def remove_cross_attention_control(model):
|
||||
remove_attention_function(model)
|
||||
|
||||
@@ -187,7 +322,7 @@ def setup_cross_attention_control(model, context: Context):
|
||||
# mask=1 means use base prompt attention, mask=0 means use edited prompt attention
|
||||
mask = torch.zeros(max_length)
|
||||
indices_target = torch.arange(max_length, dtype=torch.long)
|
||||
indices = torch.zeros(max_length, dtype=torch.long)
|
||||
indices = torch.arange(max_length, dtype=torch.long)
|
||||
for name, a0, a1, b0, b1 in context.arguments.edit_opcodes:
|
||||
if b0 < max_length:
|
||||
if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0):
|
||||
@@ -201,10 +336,23 @@ def setup_cross_attention_control(model, context: Context):
|
||||
inject_attention_function(model, context)
|
||||
|
||||
|
||||
def get_attention_modules(model, which: CrossAttentionType):
|
||||
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
|
||||
cross_attention_class: type = InvokeAICrossAttentionMixin
|
||||
# cross_attention_class: type = InvokeAIDiffusersCrossAttention
|
||||
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
|
||||
return [(name,module) for name, module in model.named_modules() if
|
||||
type(module).__name__ == "CrossAttention" and which_attn in name]
|
||||
attention_module_tuples = [(name,module) for name, module in model.named_modules() if
|
||||
isinstance(module, cross_attention_class) and which_attn in name]
|
||||
cross_attention_modules_in_model_count = len(attention_module_tuples)
|
||||
expected_count = 16
|
||||
if cross_attention_modules_in_model_count != expected_count:
|
||||
# non-fatal error but .swap() won't work.
|
||||
print(f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model " +
|
||||
f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed " +
|
||||
f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, " +
|
||||
f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows " +
|
||||
f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not " +
|
||||
f"work properly until it is fixed.")
|
||||
return attention_module_tuples
|
||||
|
||||
|
||||
def inject_attention_function(unet, context: Context):
|
||||
@@ -244,19 +392,52 @@ def inject_attention_function(unet, context: Context):
|
||||
|
||||
return attention_slice
|
||||
|
||||
for name, module in unet.named_modules():
|
||||
module_name = type(module).__name__
|
||||
if module_name == "CrossAttention":
|
||||
module.identifier = name
|
||||
cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF)
|
||||
for identifier, module in cross_attention_modules:
|
||||
module.identifier = identifier
|
||||
try:
|
||||
module.set_attention_slice_wrangler(attention_slice_wrangler)
|
||||
module.set_slicing_strategy_getter(lambda module, module_identifier=name: \
|
||||
context.get_slicing_strategy(module_identifier))
|
||||
module.set_slicing_strategy_getter(
|
||||
lambda module: context.get_slicing_strategy(identifier)
|
||||
)
|
||||
except AttributeError as e:
|
||||
if is_attribute_error_about(e, 'set_attention_slice_wrangler'):
|
||||
print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def remove_attention_function(unet):
|
||||
# clear wrangler callback
|
||||
for name, module in unet.named_modules():
|
||||
module_name = type(module).__name__
|
||||
if module_name == "CrossAttention":
|
||||
cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF)
|
||||
for identifier, module in cross_attention_modules:
|
||||
try:
|
||||
# clear wrangler callback
|
||||
module.set_attention_slice_wrangler(None)
|
||||
module.set_slicing_strategy_getter(None)
|
||||
except AttributeError as e:
|
||||
if is_attribute_error_about(e, 'set_attention_slice_wrangler'):
|
||||
print(f"TODO: implement set_attention_slice_wrangler for {type(module)}")
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def is_attribute_error_about(error: AttributeError, attribute: str):
|
||||
if hasattr(error, 'name'): # Python 3.10
|
||||
return error.name == attribute
|
||||
else: # Python 3.9
|
||||
return attribute in str(error)
|
||||
|
||||
|
||||
|
||||
def get_mem_free_total(device):
|
||||
#only on cuda
|
||||
if not torch.cuda.is_available():
|
||||
return None
|
||||
stats = torch.cuda.memory_stats(device)
|
||||
mem_active = stats['active_bytes.all.current']
|
||||
mem_reserved = stats['reserved_bytes.all.current']
|
||||
mem_free_cuda, _ = torch.cuda.mem_get_info(device)
|
||||
mem_free_torch = mem_reserved - mem_active
|
||||
mem_free_total = mem_free_cuda + mem_free_torch
|
||||
return mem_free_total
|
||||
|
||||
|
||||
95
ldm/models/diffusion/cross_attention_map_saving.py
Normal file
95
ldm/models/diffusion/cross_attention_map_saving.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import math
|
||||
|
||||
import PIL
|
||||
import torch
|
||||
from torchvision.transforms.functional import resize as tv_resize, InterpolationMode
|
||||
|
||||
from ldm.models.diffusion.cross_attention_control import get_cross_attention_modules, CrossAttentionType
|
||||
|
||||
|
||||
class AttentionMapSaver():
|
||||
|
||||
def __init__(self, token_ids: range, latents_shape: torch.Size):
|
||||
self.token_ids = token_ids
|
||||
self.latents_shape = latents_shape
|
||||
#self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]])
|
||||
self.collated_maps = {}
|
||||
|
||||
def clear_maps(self):
|
||||
self.collated_maps = {}
|
||||
|
||||
def add_attention_maps(self, maps: torch.Tensor, key: str):
|
||||
"""
|
||||
Accumulate the given attention maps and store by summing with existing maps at the passed-in key (if any).
|
||||
:param maps: Attention maps to store. Expected shape [A, (H*W), N] where A is attention heads count, H and W are the map size (fixed per-key) and N is the number of tokens (typically 77).
|
||||
:param key: Storage key. If a map already exists for this key it will be summed with the incoming data. In this case the maps sizes (H and W) should match.
|
||||
:return: None
|
||||
"""
|
||||
key_and_size = f'{key}_{maps.shape[1]}'
|
||||
|
||||
# extract desired tokens
|
||||
maps = maps[:, :, self.token_ids]
|
||||
|
||||
# merge attention heads to a single map per token
|
||||
maps = torch.sum(maps, 0)
|
||||
|
||||
# store
|
||||
if key_and_size not in self.collated_maps:
|
||||
self.collated_maps[key_and_size] = torch.zeros_like(maps, device='cpu')
|
||||
self.collated_maps[key_and_size] += maps.cpu()
|
||||
|
||||
def write_maps_to_disk(self, path: str):
|
||||
pil_image = self.get_stacked_maps_image()
|
||||
pil_image.save(path, 'PNG')
|
||||
|
||||
def get_stacked_maps_image(self) -> PIL.Image:
|
||||
"""
|
||||
Scale all collected attention maps to the same size, blend them together and return as an image.
|
||||
:return: An image containing a vertical stack of blended attention maps, one for each requested token.
|
||||
"""
|
||||
num_tokens = len(self.token_ids)
|
||||
if num_tokens == 0:
|
||||
return None
|
||||
|
||||
latents_height = self.latents_shape[0]
|
||||
latents_width = self.latents_shape[1]
|
||||
|
||||
merged = None
|
||||
|
||||
for key, maps in self.collated_maps.items():
|
||||
|
||||
# maps has shape [(H*W), N] for N tokens
|
||||
# but we want [N, H, W]
|
||||
this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height))
|
||||
this_maps_height = int(float(latents_height) * this_scale_factor)
|
||||
this_maps_width = int(float(latents_width) * this_scale_factor)
|
||||
# and we need to do some dimension juggling
|
||||
maps = torch.reshape(torch.swapdims(maps, 0, 1), [num_tokens, this_maps_height, this_maps_width])
|
||||
|
||||
# scale to output size if necessary
|
||||
if this_scale_factor != 1:
|
||||
maps = tv_resize(maps, [latents_height, latents_width], InterpolationMode.BICUBIC)
|
||||
|
||||
# normalize
|
||||
maps_min = torch.min(maps)
|
||||
maps_range = torch.max(maps) - maps_min
|
||||
#print(f"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}")
|
||||
maps_normalized = (maps - maps_min) / maps_range
|
||||
# expand to (-0.1, 1.1) and clamp
|
||||
maps_normalized_expanded = maps_normalized * 1.1 - 0.05
|
||||
maps_normalized_expanded_clamped = torch.clamp(maps_normalized_expanded, 0, 1)
|
||||
|
||||
# merge together, producing a vertical stack
|
||||
maps_stacked = torch.reshape(maps_normalized_expanded_clamped, [num_tokens * latents_height, latents_width])
|
||||
|
||||
if merged is None:
|
||||
merged = maps_stacked
|
||||
else:
|
||||
# screen blend
|
||||
merged = 1 - (1 - maps_stacked)*(1 - merged)
|
||||
|
||||
if merged is None:
|
||||
return None
|
||||
|
||||
merged_bytes = merged.mul(0xff).byte()
|
||||
return PIL.Image.fromarray(merged_bytes.numpy(), mode='L')
|
||||
@@ -4,6 +4,7 @@ import k_diffusion as K
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from .cross_attention_map_saving import AttentionMapSaver
|
||||
from .sampler import Sampler
|
||||
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
|
||||
@@ -36,6 +37,7 @@ class CFGDenoiser(nn.Module):
|
||||
self.invokeai_diffuser = InvokeAIDiffuserComponent(model,
|
||||
model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond))
|
||||
|
||||
|
||||
def prepare_to_sample(self, t_enc, **kwargs):
|
||||
|
||||
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
|
||||
@@ -106,12 +108,12 @@ class KSampler(Sampler):
|
||||
else:
|
||||
print(f'>> Ksampler using karras noise schedule (steps < {self.karras_max})')
|
||||
self.sigmas = self.karras_sigmas
|
||||
|
||||
|
||||
# ALERT: We are completely overriding the sample() method in the base class, which
|
||||
# means that inpainting will not work. To get this to work we need to be able to
|
||||
# modify the inner loop of k_heun, k_lms, etc, as is done in an ugly way
|
||||
# in the lstein/k-diffusion branch.
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def decode(
|
||||
self,
|
||||
@@ -145,7 +147,7 @@ class KSampler(Sampler):
|
||||
@torch.no_grad()
|
||||
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
||||
return x0
|
||||
|
||||
|
||||
# Most of these arguments are ignored and are only present for compatibility with
|
||||
# other samples
|
||||
@torch.no_grad()
|
||||
@@ -158,6 +160,7 @@ class KSampler(Sampler):
|
||||
callback=None,
|
||||
normals_sequence=None,
|
||||
img_callback=None,
|
||||
attention_maps_callback=None,
|
||||
quantize_x0=False,
|
||||
eta=0.0,
|
||||
mask=None,
|
||||
@@ -171,7 +174,7 @@ class KSampler(Sampler):
|
||||
log_every_t=100,
|
||||
unconditional_guidance_scale=1.0,
|
||||
unconditional_conditioning=None,
|
||||
extra_conditioning_info=None,
|
||||
extra_conditioning_info: InvokeAIDiffuserComponent.ExtraConditioningInfo=None,
|
||||
threshold = 0,
|
||||
perlin = 0,
|
||||
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
||||
@@ -204,6 +207,12 @@ class KSampler(Sampler):
|
||||
|
||||
model_wrap_cfg = CFGDenoiser(self.model, threshold=threshold, warmup=max(0.8*S,S-10))
|
||||
model_wrap_cfg.prepare_to_sample(S, extra_conditioning_info=extra_conditioning_info)
|
||||
|
||||
attention_map_token_ids = range(1, extra_conditioning_info.tokens_count_including_eos_bos - 1)
|
||||
attention_maps_saver = None if attention_maps_callback is None else AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:])
|
||||
if attention_maps_callback is not None:
|
||||
model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving(attention_maps_saver)
|
||||
|
||||
extra_args = {
|
||||
'cond': conditioning,
|
||||
'uncond': unconditional_conditioning,
|
||||
@@ -217,6 +226,8 @@ class KSampler(Sampler):
|
||||
),
|
||||
None,
|
||||
)
|
||||
if attention_maps_callback is not None:
|
||||
attention_maps_callback(attention_maps_saver)
|
||||
return sampling_result
|
||||
|
||||
# this code will support inpainting if and when ksampler API modified or
|
||||
@@ -248,7 +259,7 @@ class KSampler(Sampler):
|
||||
# terrible, confusing names here
|
||||
steps = self.ddim_num_steps
|
||||
t_enc = self.t_enc
|
||||
|
||||
|
||||
# sigmas is a full steps in length, but t_enc might
|
||||
# be less. We start in the middle of the sigma array
|
||||
# and work our way to the end after t_enc steps.
|
||||
@@ -280,7 +291,7 @@ class KSampler(Sampler):
|
||||
return x_T + x
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
def prepare_to_sample(self,t_enc,**kwargs):
|
||||
self.t_enc = t_enc
|
||||
self.model_wrap = None
|
||||
|
||||
@@ -5,8 +5,8 @@ from typing import Callable, Optional, Union
|
||||
import torch
|
||||
|
||||
from ldm.models.diffusion.cross_attention_control import Arguments, \
|
||||
remove_cross_attention_control, setup_cross_attention_control, Context
|
||||
from ldm.modules.attention import get_mem_free_total
|
||||
remove_cross_attention_control, setup_cross_attention_control, Context, get_cross_attention_modules, CrossAttentionType
|
||||
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
|
||||
|
||||
class InvokeAIDiffuserComponent:
|
||||
@@ -21,7 +21,8 @@ class InvokeAIDiffuserComponent:
|
||||
|
||||
|
||||
class ExtraConditioningInfo:
|
||||
def __init__(self, cross_attention_control_args: Optional[Arguments]):
|
||||
def __init__(self, tokens_count_including_eos_bos:int, cross_attention_control_args: Optional[Arguments]):
|
||||
self.tokens_count_including_eos_bos = tokens_count_including_eos_bos
|
||||
self.cross_attention_control_args = cross_attention_control_args
|
||||
|
||||
@property
|
||||
@@ -52,7 +53,25 @@ class InvokeAIDiffuserComponent:
|
||||
self.cross_attention_control_context = None
|
||||
remove_cross_attention_control(self.model)
|
||||
|
||||
def setup_attention_map_saving(self, saver: AttentionMapSaver):
|
||||
def callback(slice, dim, offset, slice_size, key):
|
||||
if dim is not None:
|
||||
# sliced tokens attention map saving is not implemented
|
||||
return
|
||||
saver.add_attention_maps(slice, key)
|
||||
|
||||
tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS)
|
||||
for identifier, module in tokens_cross_attention_modules:
|
||||
key = ('down' if identifier.startswith('down') else
|
||||
'up' if identifier.startswith('up') else
|
||||
'mid')
|
||||
module.set_attention_slice_calculated_callback(
|
||||
lambda slice, dim, offset, slice_size, key=key: callback(slice, dim, offset, slice_size, key))
|
||||
|
||||
def remove_attention_map_saving(self):
|
||||
tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS)
|
||||
for _, module in tokens_cross_attention_modules:
|
||||
module.set_attention_slice_calculated_callback(None)
|
||||
|
||||
def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor,
|
||||
unconditioning: Union[torch.Tensor,dict],
|
||||
|
||||
@@ -7,10 +7,9 @@ import torch.nn.functional as F
|
||||
from torch import nn, einsum
|
||||
from einops import rearrange, repeat
|
||||
|
||||
from ldm.models.diffusion.cross_attention_control import InvokeAICrossAttentionMixin
|
||||
from ldm.modules.diffusionmodules.util import checkpoint
|
||||
|
||||
import psutil
|
||||
|
||||
def exists(val):
|
||||
return val is not None
|
||||
|
||||
@@ -164,9 +163,10 @@ def get_mem_free_total(device):
|
||||
return mem_free_total
|
||||
|
||||
|
||||
class CrossAttention(nn.Module):
|
||||
class CrossAttention(nn.Module, InvokeAICrossAttentionMixin):
|
||||
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
|
||||
super().__init__()
|
||||
InvokeAICrossAttentionMixin.__init__(self)
|
||||
inner_dim = dim_head * heads
|
||||
context_dim = default(context_dim, query_dim)
|
||||
|
||||
@@ -182,118 +182,6 @@ class CrossAttention(nn.Module):
|
||||
nn.Dropout(dropout)
|
||||
)
|
||||
|
||||
self.mem_total_gb = psutil.virtual_memory().total // (1 << 30)
|
||||
|
||||
self.cached_mem_free_total = None
|
||||
self.attention_slice_wrangler = None
|
||||
self.slicing_strategy_getter = None
|
||||
|
||||
def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]):
|
||||
'''
|
||||
Set custom attention calculator to be called when attention is calculated
|
||||
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
|
||||
which returns either the suggested_attention_slice or an adjusted equivalent.
|
||||
`module` is the current CrossAttention module for which the callback is being invoked.
|
||||
`suggested_attention_slice` is the default-calculated attention slice
|
||||
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
|
||||
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
|
||||
|
||||
Pass None to use the default attention calculation.
|
||||
:return:
|
||||
'''
|
||||
self.attention_slice_wrangler = wrangler
|
||||
|
||||
def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]):
|
||||
self.slicing_strategy_getter = getter
|
||||
|
||||
def cache_free_memory_count(self, device):
|
||||
self.cached_mem_free_total = get_mem_free_total(device)
|
||||
print("free cuda memory: ", self.cached_mem_free_total)
|
||||
|
||||
def clear_cached_free_memory_count(self):
|
||||
self.cached_mem_free_total = None
|
||||
|
||||
def einsum_lowest_level(self, q, k, v, dim, offset, slice_size):
|
||||
# calculate attention scores
|
||||
attention_scores = einsum('b i d, b j d -> b i j', q, k)
|
||||
# calculate attention slice by taking the best scores for each latent pixel
|
||||
default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype)
|
||||
attention_slice_wrangler = self.attention_slice_wrangler
|
||||
if attention_slice_wrangler is not None:
|
||||
attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size)
|
||||
else:
|
||||
attention_slice = default_attention_slice
|
||||
|
||||
return einsum('b i j, b j d -> b i d', attention_slice, v)
|
||||
|
||||
def einsum_op_slice_dim0(self, q, k, v, slice_size):
|
||||
r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
for i in range(0, q.shape[0], slice_size):
|
||||
end = i + slice_size
|
||||
r[i:end] = self.einsum_lowest_level(q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size)
|
||||
return r
|
||||
|
||||
def einsum_op_slice_dim1(self, q, k, v, slice_size):
|
||||
r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
for i in range(0, q.shape[1], slice_size):
|
||||
end = i + slice_size
|
||||
r[:, i:end] = self.einsum_lowest_level(q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size)
|
||||
return r
|
||||
|
||||
def einsum_op_mps_v1(self, q, k, v):
|
||||
if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1]))
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
def einsum_op_mps_v2(self, q, k, v):
|
||||
if self.mem_total_gb > 8 and q.shape[1] <= 4096:
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
else:
|
||||
return self.einsum_op_slice_dim0(q, k, v, 1)
|
||||
|
||||
def einsum_op_tensor_mem(self, q, k, v, max_tensor_mb):
|
||||
size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20)
|
||||
if size_mb <= max_tensor_mb:
|
||||
return self.einsum_lowest_level(q, k, v, None, None, None)
|
||||
div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length()
|
||||
if div <= q.shape[0]:
|
||||
return self.einsum_op_slice_dim0(q, k, v, q.shape[0] // div)
|
||||
return self.einsum_op_slice_dim1(q, k, v, max(q.shape[1] // div, 1))
|
||||
|
||||
def einsum_op_cuda(self, q, k, v):
|
||||
# check if we already have a slicing strategy (this should only happen during cross-attention controlled generation)
|
||||
slicing_strategy_getter = self.slicing_strategy_getter
|
||||
if slicing_strategy_getter is not None:
|
||||
(dim, slice_size) = slicing_strategy_getter(self)
|
||||
if dim is not None:
|
||||
# print("using saved slicing strategy with dim", dim, "slice size", slice_size)
|
||||
if dim == 0:
|
||||
return self.einsum_op_slice_dim0(q, k, v, slice_size)
|
||||
elif dim == 1:
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
# fallback for when there is no saved strategy, or saved strategy does not slice
|
||||
mem_free_total = self.cached_mem_free_total or get_mem_free_total(q.device)
|
||||
# Divide factor of safety as there's copying and fragmentation
|
||||
return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20))
|
||||
|
||||
|
||||
def get_attention_mem_efficient(self, q, k, v):
|
||||
if q.device.type == 'cuda':
|
||||
#print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device))
|
||||
return self.einsum_op_cuda(q, k, v)
|
||||
|
||||
if q.device.type == 'mps':
|
||||
if self.mem_total_gb >= 32:
|
||||
return self.einsum_op_mps_v1(q, k, v)
|
||||
return self.einsum_op_mps_v2(q, k, v)
|
||||
|
||||
# Smaller slices are faster due to L2/L3/SLC caches.
|
||||
# Tested on i7 with 8MB L3 cache.
|
||||
return self.einsum_op_tensor_mem(q, k, v, 32)
|
||||
|
||||
def forward(self, x, context=None, mask=None):
|
||||
h = self.heads
|
||||
|
||||
@@ -305,7 +193,11 @@ class CrossAttention(nn.Module):
|
||||
|
||||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
||||
|
||||
r = self.get_attention_mem_efficient(q, k, v)
|
||||
# don't apply scale twice
|
||||
cached_scale = self.scale
|
||||
self.scale = 1
|
||||
r = self.get_invokeai_attention_mem_efficient(q, k, v)
|
||||
self.scale = cached_scale
|
||||
|
||||
hidden_states = rearrange(r, '(b h) n d -> b n (h d)', h=h)
|
||||
return self.to_out(hidden_states)
|
||||
|
||||
@@ -18,6 +18,7 @@ from tqdm import tqdm
|
||||
from omegaconf import OmegaConf
|
||||
from huggingface_hub import HfFolder, hf_hub_url
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from getpass_asterisk import getpass_asterisk
|
||||
from transformers import CLIPTokenizer, CLIPTextModel
|
||||
from ldm.invoke.globals import Globals
|
||||
@@ -39,7 +40,7 @@ Dataset_path = './configs/INITIAL_MODELS.yaml'
|
||||
Default_config_file = './configs/models.yaml'
|
||||
SD_Configs = './configs/stable-diffusion'
|
||||
|
||||
assert os.path.exists(Dataset_path),"The configs directory cannot be found. Please run this script from within the InvokeAI distribution directory, or from within the invokeai runtime directory."
|
||||
assert os.path.exists(Dataset_path),"The configs directory cannot be found. Please run this script from within the invokeai runtime directory."
|
||||
|
||||
Datasets = OmegaConf.load(Dataset_path)
|
||||
completer = generic_completer(['yes','no'])
|
||||
@@ -62,10 +63,10 @@ this program and resume later.\n'''
|
||||
)
|
||||
|
||||
#--------------------------------------------
|
||||
def postscript():
|
||||
print(
|
||||
'''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands:
|
||||
Web version:
|
||||
def postscript(errors: None):
|
||||
if not any(errors):
|
||||
message='''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands:
|
||||
Web version:
|
||||
python scripts/invoke.py --web (connect to http://localhost:9090)
|
||||
Command-line version:
|
||||
python scripts/invoke.py
|
||||
@@ -77,7 +78,14 @@ automated installation script, execute "invoke.sh" (Linux/Mac) or
|
||||
|
||||
Have fun!
|
||||
'''
|
||||
)
|
||||
|
||||
else:
|
||||
message=f"\n** There were errors during installation. It is possible some of the models were not fully downloaded.\n"
|
||||
for err in errors:
|
||||
message += f"\t - {err}\n"
|
||||
message += "Please check the logs above and correct any issues."
|
||||
|
||||
print(message)
|
||||
|
||||
#---------------------------------------------
|
||||
def yes_or_no(prompt:str, default_yes=True):
|
||||
@@ -129,7 +137,7 @@ def select_datasets(action:str):
|
||||
|
||||
if action == 'customized':
|
||||
print('''
|
||||
Choose the weight file(s) you wish to download. Before downloading you
|
||||
Choose the weight file(s) you wish to download. Before downloading you
|
||||
will be given the option to view and change your selections.
|
||||
'''
|
||||
)
|
||||
@@ -144,7 +152,7 @@ will be given the option to view and change your selections.
|
||||
if Datasets[ds]['recommended']:
|
||||
datasets[ds]=counter
|
||||
counter += 1
|
||||
|
||||
|
||||
print('The following weight files will be downloaded:')
|
||||
for ds in datasets:
|
||||
dflt = '*' if dflt is None else ''
|
||||
@@ -179,7 +187,7 @@ def all_datasets()->dict:
|
||||
#-------------------------------Authenticate against Hugging Face
|
||||
def authenticate():
|
||||
print('''
|
||||
To download the Stable Diffusion weight files from the official Hugging Face
|
||||
To download the Stable Diffusion weight files from the official Hugging Face
|
||||
repository, you need to read and accept the CreativeML Responsible AI license.
|
||||
|
||||
This involves a few easy steps.
|
||||
@@ -212,25 +220,25 @@ This involves a few easy steps.
|
||||
access_token = HfFolder.get_token()
|
||||
if access_token is not None:
|
||||
print('found')
|
||||
|
||||
if access_token is None:
|
||||
else:
|
||||
print('not found')
|
||||
print('''
|
||||
4. Thank you! The last step is to enter your HuggingFace access token so that
|
||||
this script is authorized to initiate the download. Go to the access tokens
|
||||
page of your Hugging Face account and create a token by clicking the
|
||||
page of your Hugging Face account and create a token by clicking the
|
||||
"New token" button:
|
||||
|
||||
https://huggingface.co/settings/tokens
|
||||
|
||||
(You can enter anything you like in the token creation field marked "Name".
|
||||
(You can enter anything you like in the token creation field marked "Name".
|
||||
"Role" should be "read").
|
||||
|
||||
Now copy the token to your clipboard and paste it at the prompt. Windows
|
||||
users can paste with right-click.
|
||||
users can paste with right-click or Ctrl-Shift-V.
|
||||
Token: '''
|
||||
)
|
||||
access_token = getpass_asterisk.getpass_asterisk()
|
||||
HfFolder.save_token(access_token)
|
||||
return access_token
|
||||
|
||||
#---------------------------------------------
|
||||
@@ -246,7 +254,7 @@ def migrate_models_ckpt():
|
||||
if rename:
|
||||
print(f'model.ckpt => {new_name}')
|
||||
os.replace(os.path.join(model_path,'model.ckpt'),os.path.join(model_path,new_name))
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
def download_weight_datasets(models:dict, access_token:str):
|
||||
migrate_models_ckpt()
|
||||
@@ -273,9 +281,9 @@ def download_weight_datasets(models:dict, access_token:str):
|
||||
|
||||
HfFolder.save_token(access_token)
|
||||
keys = ', '.join(successful.keys())
|
||||
print(f'Successfully installed {keys}')
|
||||
print(f'Successfully installed {keys}')
|
||||
return successful
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
def hf_download_with_resume(repo_id:str, model_dir:str, model_name:str, access_token:str=None)->bool:
|
||||
model_dest = os.path.join(model_dir, model_name)
|
||||
@@ -286,7 +294,7 @@ def hf_download_with_resume(repo_id:str, model_dir:str, model_name:str, access_t
|
||||
header = {"Authorization": f'Bearer {access_token}'} if access_token else {}
|
||||
open_mode = 'wb'
|
||||
exist_size = 0
|
||||
|
||||
|
||||
if os.path.exists(model_dest):
|
||||
exist_size = os.path.getsize(model_dest)
|
||||
header['Range'] = f'bytes={exist_size}-'
|
||||
@@ -294,7 +302,7 @@ def hf_download_with_resume(repo_id:str, model_dir:str, model_name:str, access_t
|
||||
|
||||
resp = requests.get(url, headers=header, stream=True)
|
||||
total = int(resp.headers.get('content-length', 0))
|
||||
|
||||
|
||||
if resp.status_code==416: # "range not satisfiable", which means nothing to return
|
||||
print(f'* {model_name}: complete file found. Skipping.')
|
||||
return True
|
||||
@@ -342,12 +350,12 @@ def download_with_progress_bar(model_url:str, model_dest:str, label:str='the'):
|
||||
print(f'Error downloading {label} model')
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
def update_config_file(successfully_downloaded:dict,opt:dict):
|
||||
config_file = opt.config_file or Default_config_file
|
||||
config_file = os.path.normpath(os.path.join(Globals.root,config_file))
|
||||
|
||||
|
||||
yaml = new_config_file_contents(successfully_downloaded,config_file)
|
||||
|
||||
try:
|
||||
@@ -366,8 +374,8 @@ def update_config_file(successfully_downloaded:dict,opt:dict):
|
||||
|
||||
print(f'Successfully created new configuration file {config_file}')
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
|
||||
#---------------------------------------------
|
||||
def new_config_file_contents(successfully_downloaded:dict, config_file:str)->str:
|
||||
if os.path.exists(config_file):
|
||||
conf = OmegaConf.load(config_file)
|
||||
@@ -377,19 +385,19 @@ def new_config_file_contents(successfully_downloaded:dict, config_file:str)->str
|
||||
# find the VAE file, if there is one
|
||||
vaes = {}
|
||||
default_selected = False
|
||||
|
||||
|
||||
for model in successfully_downloaded:
|
||||
a = Datasets[model]['config'].split('/')
|
||||
if a[0] != 'VAE':
|
||||
continue
|
||||
vae_target = a[1] if len(a)>1 else 'default'
|
||||
vaes[vae_target] = Datasets[model]['file']
|
||||
|
||||
|
||||
for model in successfully_downloaded:
|
||||
if Datasets[model]['config'].startswith('VAE'): # skip VAE entries
|
||||
continue
|
||||
stanza = conf[model] if model in conf else { }
|
||||
|
||||
|
||||
stanza['description'] = Datasets[model]['description']
|
||||
stanza['weights'] = os.path.join(Model_dir,Weights_dir,Datasets[model]['file'])
|
||||
stanza['config'] = os.path.normpath(os.path.join(SD_Configs, Datasets[model]['config']))
|
||||
@@ -408,7 +416,7 @@ def new_config_file_contents(successfully_downloaded:dict, config_file:str)->str
|
||||
default_selected = True
|
||||
conf[model] = stanza
|
||||
return OmegaConf.to_yaml(conf)
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
# this will preload the Bert tokenizer fles
|
||||
def download_bert():
|
||||
@@ -478,7 +486,7 @@ def download_clipseg():
|
||||
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
||||
model_dest = os.path.join(Globals.root,'models/clipseg/clipseg_weights')
|
||||
weights_zip = 'models/clipseg/weights.zip'
|
||||
|
||||
|
||||
if not os.path.exists(model_dest):
|
||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||
if not os.path.exists(f'{model_dest}/rd64-uni-refined.pth'):
|
||||
@@ -521,17 +529,27 @@ def download_safety_checker():
|
||||
print('...success',file=sys.stderr)
|
||||
|
||||
#-------------------------------------
|
||||
def download_weights(opt:dict):
|
||||
def download_weights(opt:dict) -> Union[str, None]:
|
||||
# Authenticate to Huggingface using environment variables.
|
||||
# If successful, authentication will persist for either interactive or non-interactive use.
|
||||
# Default env var expected by HuggingFace is HUGGING_FACE_HUB_TOKEN.
|
||||
if not (access_token := HfFolder.get_token()):
|
||||
# If unable to find an existing token or expected environment, try the non-canonical environment variable (widely used in the community and supported as per docs)
|
||||
if (access_token := os.getenv("HUGGINGFACE_TOKEN")):
|
||||
# set the environment variable here instead of simply calling huggingface_hub.login(token), to maintain consistent behaviour.
|
||||
# when calling the .login() method, the token is cached in the user's home directory. When the env var is used, the token is NOT cached.
|
||||
os.environ['HUGGING_FACE_HUB_TOKEN'] = access_token
|
||||
|
||||
if opt.yes_to_all:
|
||||
models = recommended_datasets()
|
||||
access_token = HfFolder.get_token()
|
||||
if len(models)>0 and access_token is not None:
|
||||
successfully_downloaded = download_weight_datasets(models, access_token)
|
||||
update_config_file(successfully_downloaded,opt)
|
||||
return
|
||||
else:
|
||||
print('** Cannot download models because no Hugging Face access token could be found. Please re-run without --yes')
|
||||
return
|
||||
return "could not download model weights from Huggingface due to missing or invalid access token"
|
||||
|
||||
else:
|
||||
choice = user_wants_to_download_weights()
|
||||
|
||||
@@ -547,10 +565,13 @@ def download_weights(opt:dict):
|
||||
return
|
||||
|
||||
print('** LICENSE AGREEMENT FOR WEIGHT FILES **')
|
||||
# We are either already authenticated, or will be asked to provide the token interactively
|
||||
access_token = authenticate()
|
||||
print('\n** DOWNLOADING WEIGHTS **')
|
||||
successfully_downloaded = download_weight_datasets(models, access_token)
|
||||
update_config_file(successfully_downloaded,opt)
|
||||
if len(successfully_downloaded) < len(models):
|
||||
return "some of the model weights downloads were not successful"
|
||||
|
||||
#-------------------------------------
|
||||
def get_root(root:str=None)->str:
|
||||
@@ -559,22 +580,7 @@ def get_root(root:str=None)->str:
|
||||
elif os.environ.get('INVOKEAI_ROOT'):
|
||||
return os.environ.get('INVOKEAI_ROOT')
|
||||
else:
|
||||
init_file = os.path.expanduser(Globals.initfile)
|
||||
if not os.path.exists(init_file):
|
||||
return None
|
||||
|
||||
# if we get here, then we read from initfile
|
||||
root = None
|
||||
with open(init_file, 'r') as infile:
|
||||
lines = infile.readlines()
|
||||
for l in lines:
|
||||
if re.search('\s*#',l): # ignore comments
|
||||
continue
|
||||
match = re.search('--root\s*=?\s*"?([^"]+)"?',l)
|
||||
if match:
|
||||
root = match.groups()[0]
|
||||
root = root.strip()
|
||||
return root
|
||||
return Globals.root
|
||||
|
||||
#-------------------------------------
|
||||
def select_root(root:str, yes_to_all:bool=False):
|
||||
@@ -601,22 +607,20 @@ def select_outputs(root:str,yes_to_all:bool=False):
|
||||
#-------------------------------------
|
||||
def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
assert os.path.exists('./configs'),'Run this script from within the InvokeAI source code directory, "InvokeAI" or the runtime directory "invokeai".'
|
||||
|
||||
|
||||
print(f'** INITIALIZING INVOKEAI RUNTIME DIRECTORY **')
|
||||
root_selected = False
|
||||
while not root_selected:
|
||||
root = select_root(root,yes_to_all)
|
||||
outputs = select_outputs(root,yes_to_all)
|
||||
Globals.root = os.path.abspath(root)
|
||||
outputs = outputs if os.path.isabs(outputs) else os.path.abspath(os.path.join(Globals.root,outputs))
|
||||
|
||||
print(f'\nInvokeAI models and configuration files will be placed into "{root}" and image outputs will be placed into "{outputs}".')
|
||||
print(f'\nInvokeAI image outputs will be placed into "{outputs}".')
|
||||
if not yes_to_all:
|
||||
root_selected = yes_or_no('Accept these locations?')
|
||||
root_selected = yes_or_no('Accept this location?')
|
||||
else:
|
||||
root_selected = True
|
||||
|
||||
print(f'\nYou may change the chosen directories at any time by editing the --root and --outdir options in "{Globals.initfile}",')
|
||||
print(f'\nYou may change the chosen output directory at any time by editing the --outdir options in "{Globals.initfile}",')
|
||||
print(f'You may also change the runtime directory by setting the environment variable INVOKEAI_ROOT.\n')
|
||||
|
||||
enable_safety_checker = True
|
||||
@@ -630,6 +634,7 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
print('It can be selectively enabled at run time with --nsfw_checker, and disabled with --no-nsfw_checker.')
|
||||
print('The following option will set whether the checker is enabled by default. Like other options, you can')
|
||||
print(f'change this setting later by editing the file {Globals.initfile}.')
|
||||
print(f'The NSFW checker is a memory hog. If you have less than 6 GB of VRAM answer NO to this option.')
|
||||
enable_safety_checker = yes_or_no('Enable the NSFW checker by default?',enable_safety_checker)
|
||||
|
||||
print('\nThe next choice selects the sampler to use by default. Samplers have different speed/performance')
|
||||
@@ -658,7 +663,7 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
shutil.copytree(src,dest,dirs_exist_ok=True)
|
||||
os.makedirs(outputs, exist_ok=True)
|
||||
|
||||
init_file = os.path.expanduser(Globals.initfile)
|
||||
init_file = os.path.join(Globals.root,Globals.initfile)
|
||||
|
||||
print(f'Creating the initialization file at "{init_file}".\n')
|
||||
with open(init_file,'w') as f:
|
||||
@@ -667,9 +672,6 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||
# or renaming it and then running configure_invokeai.py again.
|
||||
|
||||
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
||||
--root="{Globals.root}"
|
||||
|
||||
# the --outdir option controls the default location of image files.
|
||||
--outdir="{outputs}"
|
||||
|
||||
@@ -685,7 +687,7 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
# -Ak_euler_a -C10.0
|
||||
#
|
||||
''')
|
||||
|
||||
|
||||
#-------------------------------------
|
||||
class ProgressBar():
|
||||
def __init__(self,model_name='file'):
|
||||
@@ -736,12 +738,15 @@ def main():
|
||||
|
||||
# We check for to see if the runtime directory is correctly initialized.
|
||||
if Globals.root == '' \
|
||||
or not os.path.exists(os.path.join(Globals.root,'configs/stable-diffusion/v1-inference.yaml')):
|
||||
or not os.path.exists(os.path.join(Globals.root,'invokeai.init')):
|
||||
initialize_rootdir(Globals.root,opt.yes_to_all)
|
||||
|
||||
# Optimistically try to download all required assets. If any errors occur, add them and proceed anyway.
|
||||
errors=set()
|
||||
|
||||
if opt.interactive:
|
||||
print('** DOWNLOADING DIFFUSION WEIGHTS **')
|
||||
download_weights(opt)
|
||||
errors.add(download_weights(opt))
|
||||
print('\n** DOWNLOADING SUPPORT MODELS **')
|
||||
download_bert()
|
||||
download_clip()
|
||||
@@ -750,13 +755,13 @@ def main():
|
||||
download_codeformer()
|
||||
download_clipseg()
|
||||
download_safety_checker()
|
||||
postscript()
|
||||
postscript(errors=errors)
|
||||
except KeyboardInterrupt:
|
||||
print('\nGoodbye! Come back soon.')
|
||||
except Exception as e:
|
||||
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
#-------------------------------------
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
2
setup.py
2
setup.py
@@ -6,7 +6,7 @@ from setuptools import setup, find_packages
|
||||
def list_files(directory):
|
||||
return [os.path.join(directory,x) for x in os.listdir(directory) if os.path.isfile(os.path.join(directory,x))]
|
||||
|
||||
VERSION = '2.2.0'
|
||||
VERSION = '2.2.4'
|
||||
DESCRIPTION = ('An implementation of Stable Diffusion which provides various new features'
|
||||
' and options to aid the image generation process')
|
||||
LONG_DESCRIPTION = ('This version of Stable Diffusion features a slick WebGUI, an'
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh.in invokeAI/install.sh
|
||||
chmod a+x invokeAI/install.sh
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-linux.zip invokeAI
|
||||
zip -r invokeAI-src-installer-mac.zip invokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat.in invokeAI/install.bat
|
||||
cp readme.txt invokeAI
|
||||
cp WinLongPathsEnabled.reg invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-windows.zip invokeAI
|
||||
|
||||
rm -rf invokeAI
|
||||
echo "The installer zips are ready to be distributed.."
|
||||
@@ -1,127 +0,0 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
echo "<Press any key to start the install process>"
|
||||
pause
|
||||
echo ""
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git init
|
||||
call git config --local init.defaultBranch main
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
call git checkout origin/main -ft
|
||||
)
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
copy source_installer\invoke.bat.in .\invoke.bat
|
||||
copy source_installer\update.bat.in .\update.bat
|
||||
|
||||
call conda activate invokeai
|
||||
@rem call configure script
|
||||
call python scripts\configure_invokeai.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The configure script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
read -n 1 -s -r -p "<Press any key to start the install>"
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="osx";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
if ! $(which conda) -V &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! which git &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git init
|
||||
git config --local init.defaultBranch main
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/main -ft
|
||||
fi
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "macOS detected. Installing MPS and CPU support."
|
||||
ln -sf environments-and-requirements/environment-mac.yml environment.yml
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
ln -sf environments-and-requirements/environment-lin-amd.yml environment.yml
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
ln -sf environments-and-requirements/environment-lin-cuda.yml environment.yml
|
||||
fi
|
||||
fi
|
||||
conda env update
|
||||
|
||||
status=$?
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
ln -sf ./source_installer/invoke.sh.in ./invoke.sh
|
||||
ln -sf ./source_installer/update.sh.in ./update.sh
|
||||
|
||||
conda activate invokeai
|
||||
# configure
|
||||
echo "Calling the configure_invokeai script"
|
||||
python scripts/configure_invokeai.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The configure_invoke.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
conda activate invokeai
|
||||
@@ -1,29 +0,0 @@
|
||||
@echo off
|
||||
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
call conda activate invokeai
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
python scripts\invoke.py
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python scripts\invoke.py --web
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
call where python
|
||||
call python --version
|
||||
|
||||
cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
@@ -1,16 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
You may need to enable Windows Long Paths to install InvokeAI. If you're not
|
||||
sure what this is, you almost certainly need to do this. Simply double-click the
|
||||
"WinLongPathsEnabled.reg" file located in this directory, and approve the Windows
|
||||
warnings. Note that you will need to have admin privileges in order to do this.
|
||||
|
||||
Then double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||
@@ -1,19 +0,0 @@
|
||||
@echo off
|
||||
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem update the repo
|
||||
if exist ".git" (
|
||||
call git pull
|
||||
)
|
||||
|
||||
|
||||
conda env update
|
||||
conda activate invokeai
|
||||
python scripts/preload_models.py
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# update the repo
|
||||
if [ -e ".git" ]; then
|
||||
git pull
|
||||
fi
|
||||
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate invokeai
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) conda env update;;
|
||||
Darwin*) conda env update -f environment-mac.yml;;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
python scripts/preload_models.py
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
banana sushi -Ak_lms -S42 -s10
|
||||
@@ -1 +1,3 @@
|
||||
banana sushi -Ak_lms -S42 -s10
|
||||
banana sushi -Ak_lms -S42 -s5
|
||||
banana sushi -Ak_heun -S42 -s5
|
||||
banana sushi -Addim -S42 -s5
|
||||
|
||||
Reference in New Issue
Block a user