Compare commits

..

4 Commits

Author SHA1 Message Date
Lincoln Stein
0bbc83fe34 Bump up version to v2.3.0rc3
- Also changed the "latest" tag to "latest-2.3" to avoid interfering
  with the 2.2.5 update script, which fetches "latest"
2023-02-04 13:34:11 -05:00
Lincoln Stein
841e92e02a guesstimated disk space needed for "all" models 2023-02-04 13:32:34 -05:00
Lincoln Stein
4fbcdf2e28 adjust estimated file size reported by configure 2023-02-04 13:18:28 -05:00
Lincoln Stein
f744cf06e3 configuration script tidying up
- Rename configure_invokeai.py to invokeai_configure.py to be
  consistent with installed script name
- Remove warning message about half-precision models not being
  available during the model download process.
2023-02-04 12:00:17 -05:00
722 changed files with 35918 additions and 235144 deletions

View File

@@ -1,6 +0,0 @@
[run]
omit='.env/*'
source='.'
[report]
show_missing = true

View File

@@ -1,25 +1,20 @@
# use this file as a whitelist
*
!backend
!invokeai
!ldm
!pyproject.toml
!README.md
!scripts
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
**/*.ckpt
# ignore frontend but whitelist dist
invokeai/frontend/
!invokeai/frontend/dist/
# whitelist frontend, but ignore node_modules
invokeai/frontend/node_modules
# ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets/
!invokeai/assets/web/
# Byte-compiled / optimized / DLL files
**/__pycache__/
# ignore python cache
**/__pycache__
**/*.py[cod]
# Distribution / packaging
*.egg-info/
*.egg
**/*.egg-info

View File

@@ -1,8 +1,5 @@
root = true
# All files
[*]
max_line_length = 80
charset = utf-8
end_of_line = lf
indent_size = 2
@@ -13,18 +10,3 @@ trim_trailing_whitespace = true
# Python
[*.py]
indent_size = 4
max_line_length = 120
# css
[*.css]
indent_size = 4
# flake8
[.flake8]
indent_size = 4
# Markdown MkDocs
[docs/**/*.md]
max_line_length = 80
indent_size = 4
indent_style = unset

37
.flake8
View File

@@ -1,37 +0,0 @@
[flake8]
max-line-length = 120
extend-ignore =
# See https://github.com/PyCQA/pycodestyle/issues/373
E203,
# use Bugbear's B950 instead
E501,
# from black repo https://github.com/psf/black/blob/main/.flake8
E266, W503, B907
extend-select =
# Bugbear line length
B950
extend-exclude =
scripts/orig_scripts/*
ldm/models/*
ldm/modules/*
ldm/data/*
ldm/generate.py
ldm/util.py
ldm/simplet2i.py
per-file-ignores =
# B950 line too long
# W605 invalid escape sequence
# F841 assigned to but never used
# F401 imported but unused
tests/test_prompt_parser.py: B950, W605, F401
tests/test_textual_inversion.py: F841, B950
# B023 Function definition does not bind loop variable
scripts/legacy_api.py: F401, B950, B023, F841
ldm/invoke/__init__.py: F401
# B010 Do not call setattr with a constant attribute value
ldm/invoke/server_legacy.py: B010
# =====================
# flake-quote settings:
# =====================
# Set this to match black style:
inline-quotes = double

68
.github/CODEOWNERS vendored
View File

@@ -1,61 +1,7 @@
# continuous integration
/.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation
/docs/ @lstein @mauwii @blessedcoolant
mkdocs.yml @mauwii @lstein
# installation and configuration
/pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii
/scripts/ @ebr @lstein @blessedcoolant
/installer/ @ebr @lstein
ldm/invoke/config @lstein @ebr
invokeai/assets @lstein @blessedcoolant
invokeai/configs @lstein @ebr @blessedcoolant
/ldm/invoke/_version.py @lstein @blessedcoolant
# web ui
/invokeai/frontend @blessedcoolant @psychedelicious
/invokeai/backend @blessedcoolant @psychedelicious
# generation and model management
/ldm/*.py @lstein @blessedcoolant
/ldm/generate.py @lstein @keturn
/ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein @blessedcoolant
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
/ldm/invoke/CLI.py @lstein @blessedcoolant
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
/ldm/invoke/generator @keturn @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
/ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
/ldm/invoke/patchmatch.py @Kyle0654 @lstein
/ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn @blessedcoolant
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
/ldm/modules/attention.py @damian0815 @keturn
/ldm/modules/diffusionmodules @damian0815 @keturn
/ldm/modules/distributions @damian0815 @keturn
/ldm/modules/ema.py @damian0815 @keturn
/ldm/modules/embedding_manager.py @lstein
/ldm/modules/encoders @damian0815 @keturn
/ldm/modules/image_degradation @damian0815 @keturn
/ldm/modules/losses @damian0815 @keturn
/ldm/modules/x_transformer.py @damian0815 @keturn
# Nodes
apps/ @Kyle0654 @jpphoto
# legacy REST API
# these are dead code
#/ldm/invoke/pngwriter.py @CapableWeb
#/ldm/invoke/server_legacy.py @CapableWeb
#/scripts/legacy_api.py @CapableWeb
#/tests/legacy_tests.sh @CapableWeb
ldm/invoke/pngwriter.py @CapableWeb
ldm/invoke/server_legacy.py @CapableWeb
scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb
installer/ @ebr
.github/workflows/ @mauwii
docker/ @mauwii

View File

@@ -3,19 +3,8 @@ on:
push:
branches:
- 'main'
- 'update/ci/docker/*'
- 'update/docker/*'
paths:
- 'pyproject.toml'
- 'ldm/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/dist/**'
- 'docker/Dockerfile'
tags:
- 'v*.*.*'
workflow_dispatch:
jobs:
docker:
@@ -30,15 +19,18 @@ jobs:
include:
- flavor: amd
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cuda
pip-extra-index-url: ''
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
runs-on: ubuntu-latest
name: ${{ matrix.flavor }}
env:
PLATFORMS: 'linux/amd64,linux/arm64'
DOCKERFILE: 'docker/Dockerfile'
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -48,27 +40,23 @@ jobs:
uses: docker/metadata-action@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
images: |
ghcr.io/${{ github.repository }}
${{ vars.DOCKERHUB_REPOSITORY }}
images: ghcr.io/${{ github.repository }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha,enable=true,prefix=sha-,format=short
type=sha
flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
suffix=${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: ${{ env.PLATFORMS }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
@@ -78,34 +66,25 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container
id: docker_build
uses: docker/build-push-action@v4
uses: docker/build-push-action@v3
with:
context: .
file: ${{ env.DOCKERFILE }}
platforms: ${{ env.PLATFORMS }}
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
file: ${{ matrix.dockerfile }}
platforms: ${{ matrix.platforms }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
type=gha,scope=main-${{ matrix.flavor }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Docker Hub Description
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
short-description: ${{ github.event.repository.description }}
- name: Output image, digest and metadata to summary
run: |
{
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
echo digest: "${{ steps.docker_build.outputs.digest }}"
echo labels: "${{ steps.meta.outputs.labels }}"
echo tags: "${{ steps.meta.outputs.tags }}"
echo version: "${{ steps.meta.outputs.version }}"
} >> "$GITHUB_STEP_SUMMARY"

View File

@@ -9,10 +9,6 @@ jobs:
mkdocs-material:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
env:
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
REPO_NAME: '${{ github.repository }}'
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
steps:
- name: checkout sources
uses: actions/checkout@v3
@@ -23,15 +19,11 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: '3.10'
cache: pip
cache-dependency-path: pyproject.toml
- name: install requirements
env:
PIP_USE_PEP517: 1
run: |
python -m \
pip install ".[docs]"
pip install -r docs/requirements-mkdocs.txt
- name: confirm buildability
run: |

View File

@@ -1,41 +0,0 @@
name: PyPI Release
on:
push:
paths:
- 'ldm/invoke/_version.py'
workflow_dispatch:
jobs:
release:
if: github.repository == 'invoke-ai/InvokeAI'
runs-on: ubuntu-22.04
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
TWINE_NON_INTERACTIVE: 1
steps:
- name: checkout sources
uses: actions/checkout@v3
- name: install deps
run: pip install --upgrade build twine
- name: build package
run: python3 -m build
- name: check distribution
run: twine check dist/*
- name: check PyPI versions
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
run: |
pip install --upgrade requests
python -c "\
import scripts.pypi_helper; \
EXISTS=scripts.pypi_helper.local_on_pypi(); \
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
- name: upload package
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
run: twine upload dist/*

View File

@@ -1,67 +0,0 @@
name: Test invoke.py pip
on:
pull_request:
paths-ignore:
- 'pyproject.toml'
- 'ldm/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/dist/**'
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
if: github.event.pull_request.draft == false
strategy:
matrix:
python-version:
# - '3.9'
- '3.10'
pytorch:
# - linux-cuda-11_6
- linux-cuda-11_7
- linux-rocm-5_2
- linux-cpu
- macos-default
- windows-cpu
# - windows-cuda-11_6
# - windows-cuda-11_7
include:
# - pytorch: linux-cuda-11_6
# os: ubuntu-22.04
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
# github-env: $GITHUB_ENV
- pytorch: linux-cuda-11_7
os: ubuntu-22.04
github-env: $GITHUB_ENV
- pytorch: linux-rocm-5_2
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
github-env: $GITHUB_ENV
- pytorch: linux-cpu
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/cpu'
github-env: $GITHUB_ENV
- pytorch: macos-default
os: macOS-12
github-env: $GITHUB_ENV
- pytorch: windows-cpu
os: windows-2022
github-env: $env:GITHUB_ENV
# - pytorch: windows-cuda-11_6
# os: windows-2022
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
# github-env: $env:GITHUB_ENV
# - pytorch: windows-cuda-11_7
# os: windows-2022
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
# github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
steps:
- run: 'echo "No build required"'

View File

@@ -3,29 +3,15 @@ on:
push:
branches:
- 'main'
paths:
- 'pyproject.toml'
- 'ldm/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/dist/**'
pull_request:
paths:
- 'pyproject.toml'
- 'ldm/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/dist/**'
types:
- 'ready_for_review'
- 'opened'
- 'synchronize'
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
@@ -76,13 +62,28 @@ jobs:
# github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
env:
PIP_USE_PEP517: '1'
steps:
- name: Checkout sources
id: checkout-sources
uses: actions/checkout@v3
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Set Cache-Directory Windows
if: runner.os == 'Windows'
id: set-cache-dir-windows
run: |
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
- name: Set Cache-Directory others
if: runner.os != 'Windows'
id: set-cache-dir-others
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
@@ -91,29 +92,26 @@ jobs:
if: ${{ github.ref != 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: pip
cache-dependency-path: pyproject.toml
- name: install invokeai
env:
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
run: >
pip3 install
--use-pep517
--editable=".[test]"
- name: run pytest
id: run-pytest
run: pytest
- name: set INVOKEAI_OUTDIR
run: >
python -c
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
>> ${{ matrix.github-env }}
- name: Use Cached models
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-models
with:
path: ${{ env.CACHE_DIR }}
key: ${{ env.cache-name }}
enableCrossOsArchive: true
- name: run invokeai-configure
id: run-preload-models
@@ -126,8 +124,9 @@ jobs:
--full-precision
# can't use fp16 weights without a GPU
- name: run invokeai
id: run-invokeai
- name: Run the tests
if: runner.os != 'Windows'
id: run-tests
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
@@ -138,11 +137,10 @@ jobs:
--no-patchmatch
--no-nsfw_checker
--from_file ${{ env.TEST_PROMPTS }}
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
- name: Archive results
id: archive-results
uses: actions/upload-artifact@v3
with:
name: results
path: ${{ env.INVOKEAI_OUTDIR }}
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
path: ${{ env.INVOKEAI_ROOT }}/outputs

4
.gitignore vendored
View File

@@ -1,5 +1,4 @@
# ignore default image save location and model symbolic link
.idea/
embeddings/
outputs/
models/ldm/stable-diffusion-v1/model.ckpt
@@ -68,7 +67,6 @@ htmlcov/
.cache
nosetests.xml
coverage.xml
cov.xml
*.cover
*.py,cover
.hypothesis/
@@ -234,4 +232,4 @@ installer/update.bat
installer/update.sh
# no longer stored in source directory
models
models

View File

@@ -1,41 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
additional_dependencies:
- flake8-black
- flake8-bugbear
- flake8-comprehensions
- flake8-simplify
- repo: https://github.com/pre-commit/mirrors-prettier
rev: 'v3.0.0-alpha.4'
hooks:
- id: prettier
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: end-of-file-fixer
- id: no-commit-to-branch
args: ['--branch', 'main']
- id: trailing-whitespace

View File

@@ -1,14 +0,0 @@
invokeai/frontend/.husky
invokeai/frontend/patches
# Ignore artifacts:
build
coverage
static
invokeai/frontend/dist
# Ignore all HTML files:
*.html
# Ignore deprecated docs
docs/installation/deprecated_documentation

View File

@@ -1,9 +1,9 @@
embeddedLanguageFormatting: auto
endOfLine: lf
singleQuote: true
semi: true
trailingComma: es5
tabWidth: 2
useTabs: false
singleQuote: true
quoteProps: as-needed
embeddedLanguageFormatting: auto
overrides:
- files: '*.md'
options:
@@ -11,9 +11,3 @@ overrides:
printWidth: 80
parser: markdown
cursorOffset: -1
- files: docs/**/*.md
options:
tabWidth: 4
- files: 'invokeai/frontend/public/locales/*.json'
options:
tabWidth: 4

View File

@@ -1,5 +0,0 @@
[pytest]
DJANGO_SETTINGS_MODULE = webtas.settings
; python_files = tests.py test_*.py *_tests.py
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml

159
README.md
View File

@@ -1,6 +1,6 @@
<div align="center">
![project logo](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
# InvokeAI: A Stable Diffusion Toolkit
@@ -10,10 +10,10 @@
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
@@ -28,14 +28,12 @@
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
[translation status link]: https://hosted.weblate.org/engage/invokeai/
</div>
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
_Note: InvokeAI is rapidly evolving. Please use the
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
@@ -43,136 +41,38 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
<div align="center">
![canvas preview](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/canvas_preview.png)
![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png)
</div>
## Table of Contents
1. [Quick Start](#getting-started-with-invokeai)
2. [Installation](#detailed-installation-instructions)
3. [Hardware Requirements](#hardware-requirements)
4. [Features](#features)
5. [Latest Changes](#latest-changes)
6. [Troubleshooting](#troubleshooting)
7. [Contributing](#contributing)
8. [Contributors](#contributors)
9. [Support](#support)
10. [Further Reading](#further-reading)
## Getting Started with InvokeAI
# Getting Started with InvokeAI
For full installation and upgrade instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
### Automatic Installer (suggested for 1st time users)
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
2. Download the .zip file for your OS (Windows/macOS/Linux).
3. Unzip the file.
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
5. Wait a while, until it is done.
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
8. Type `banana sushi` in the box on the top left and click `Invoke`
4. If you are on Windows, double-click on the `install.bat` script. On
macOS, open a Terminal window, drag the file `install.sh` from Finder
into the Terminal, and press return. On Linux, run `install.sh`.
5. You'll be asked to confirm the location of the folder in which
to install InvokeAI and its image generation model files. Pick a
location with at least 15 GB of free memory. More if you plan on
installing lots of models.
## Table of Contents
6. Wait while the installer does its thing. After installing the software,
the installer will launch a script that lets you configure InvokeAI and
select a set of starting image generaiton models.
1. [Installation](#installation)
2. [Hardware Requirements](#hardware-requirements)
3. [Features](#features)
4. [Latest Changes](#latest-changes)
5. [Troubleshooting](#troubleshooting)
6. [Contributing](#contributing)
7. [Contributors](#contributors)
8. [Support](#support)
9. [Further Reading](#further-reading)
7. Find the folder that InvokeAI was installed into (it is not the
same as the unpacked zip file directory!) The default location of this
folder (if you didn't change it in step 5) is `~/invokeai` on
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
8. On Windows systems, double-click on the `invoke.bat` file. On
macOS, open a Terminal window, drag `invoke.sh` from the folder into
the Terminal, and press return. On Linux, run `invoke.sh`
9. Press 2 to open the "browser-based UI", press enter/return, wait a
minute or two for Stable Diffusion to start up, then open your browser
and go to http://localhost:9090.
10. Type `banana sushi` in the box on the top left and click `Invoke`
### Command-Line Installation (for users familiar with Terminals)
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
not supported.
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
```terminal
mkdir invokeai
````
3. Create a virtual environment named `.venv` inside this directory and activate it:
```terminal
cd invokeai
python -m venv .venv --prompt InvokeAI
```
4. Activate the virtual environment (do it every time you run InvokeAI)
_For Linux/Mac users:_
```sh
source .venv/bin/activate
```
_For Windows users:_
```ps
.venv\Scripts\activate
```
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
_For Windows/Linux with an NVIDIA GPU:_
```terminal
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
_For Linux with an AMD GPU:_
```sh
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
```
_For Macintoshes, either Intel or M1/M2:_
```sh
pip install InvokeAI --use-pep517
```
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
```terminal
invokeai-configure
```
7. Launch the web server (do it every time you run InvokeAI):
```terminal
invokeai --web
```
8. Point your browser to http://localhost:9090 to bring up the web interface.
9. Type `banana sushi` in the box on the top left and click `Invoke`.
Be sure to activate the virtual environment each time before re-launching InvokeAI,
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
### Detailed Installation Instructions
## Installation
This fork is supported across Linux, Windows and Macintosh. Linux
users can use either an Nvidia-based card (with CUDA support) or an
@@ -180,29 +80,28 @@ AMD card (using the ROCm driver). For full installation and upgrade
instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
## Hardware Requirements
### Hardware Requirements
InvokeAI is supported across Linux, Windows and macOS. Linux
users can use either an Nvidia-based card (with CUDA support) or an
AMD card (using the ROCm driver).
### System
#### System
You will need one of the following:
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- An Apple computer with an M1 chip.
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
We do not recommend the GTX 1650 or 1660 series video cards. They are
unable to run in half-precision mode and do not have sufficient VRAM
to render 512x512 images.
### Memory
#### Memory
- At least 12 GB Main Memory RAM.
### Disk
#### Disk
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
@@ -252,15 +151,13 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
problems and other issues.
## Contributing
# Contributing
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
cleanup, testing, or code reviews, is very much encouraged to do so.
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
If you are unfamiliar with how
to contribute to GitHub projects, here is a
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
@@ -277,8 +174,6 @@ This fork is a combined effort of various people from across the world.
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
their time, hard work and effort.
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
### Support
For support, please use this repository's GitHub Issues tracking service, or join the Discord.

Binary file not shown.

View File

@@ -0,0 +1,164 @@
@echo off
@rem This script will install git (if not found on the PATH variable)
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
@rem For users who already have git, this step will be skipped.
@rem Next, it'll download the project's source code.
@rem Then it will download a self-contained, standalone Python and unpack it.
@rem Finally, it'll create the Python virtual environment and preload the models.
@rem This enables a user to install this project without manually installing git or Python
@rem change to the script's directory
PUSHD "%~dp0"
set "no_cache_dir=--no-cache-dir"
if "%1" == "use-cache" (
set "no_cache_dir="
)
echo ***** Installing InvokeAI.. *****
@rem Config
set INSTALL_ENV_DIR=%cd%\installer_files\env
@rem https://mamba.readthedocs.io/en/latest/installation.html
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
set PACKAGES_TO_INSTALL=
call git --version >.tmp1 2>.tmp2
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
@rem Cleanup
del /q .tmp1 .tmp2
@rem (if necessary) install git into a contained environment
if "%PACKAGES_TO_INSTALL%" NEQ "" (
@rem download micromamba
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
@rem test the mamba binary
echo ***** Micromamba version: *****
call micromamba.exe --version
@rem create the installer env
if not exist "%INSTALL_ENV_DIR%" (
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
)
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
if not exist "%INSTALL_ENV_DIR%" (
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
pause
exit /b
)
)
del /q micromamba.exe
@rem For 'git' only
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
@rem Download/unpack/clean up InvokeAI release sourceball
set err_msg=----- InvokeAI source download failed -----
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
if %errorlevel% neq 0 goto err_exit
set err_msg=----- InvokeAI source unpack failed -----
tar -zxf InvokeAI.tgz
if %errorlevel% neq 0 goto err_exit
del /q InvokeAI.tgz
set err_msg=----- InvokeAI source copy failed -----
cd InvokeAI-*
xcopy . .. /e /h
if %errorlevel% neq 0 goto err_exit
cd ..
@rem cleanup
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
rd /s /q .dev_scripts .github docker-build tests
del /q requirements.in requirements-mkdocs.txt shell.nix
echo ***** Unpacked InvokeAI source *****
@rem Download/unpack/clean up python-build-standalone
set err_msg=----- Python download failed -----
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
if %errorlevel% neq 0 goto err_exit
set err_msg=----- Python unpack failed -----
tar -zxf python.tgz
if %errorlevel% neq 0 goto err_exit
del /q python.tgz
echo ***** Unpacked python-build-standalone *****
@rem create venv
set err_msg=----- problem creating venv -----
.\python\python -E -s -m venv .venv
if %errorlevel% neq 0 goto err_exit
call .venv\Scripts\activate.bat
echo ***** Created Python virtual environment *****
@rem Print venv's Python version
set err_msg=----- problem calling venv's python -----
echo We're running under
.venv\Scripts\python --version
if %errorlevel% neq 0 goto err_exit
set err_msg=----- pip update failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
if %errorlevel% neq 0 goto err_exit
echo ***** Updated pip and wheel *****
set err_msg=----- requirements file copy failed -----
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
if %errorlevel% neq 0 goto err_exit
set err_msg=----- main pip install failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
if %errorlevel% neq 0 goto err_exit
echo ***** Installed Python dependencies *****
set err_msg=----- InvokeAI setup failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
if %errorlevel% neq 0 goto err_exit
copy binary_installer\invoke.bat.in .\invoke.bat
echo ***** Installed invoke launcher script ******
@rem more cleanup
rd /s /q binary_installer installer_files
@rem preload the models
call .venv\Scripts\python scripts\configure_invokeai.py
set err_msg=----- model download clone failed -----
if %errorlevel% neq 0 goto err_exit
deactivate
echo ***** Finished downloading models *****
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
pause
exit
:err_exit
echo %err_msg%
pause
exit

View File

@@ -0,0 +1,235 @@
#!/usr/bin/env bash
# ensure we're in the correct folder in case user's CWD is somewhere else
scriptdir=$(dirname "$0")
cd "$scriptdir"
set -euo pipefail
IFS=$'\n\t'
function _err_exit {
if test "$1" -ne 0
then
echo -e "Error code $1; Error caught was '$2'"
read -p "Press any key to exit..."
exit
fi
}
# This script will install git (if not found on the PATH variable)
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
# For users who already have git, this step will be skipped.
# Next, it'll download the project's source code.
# Then it will download a self-contained, standalone Python and unpack it.
# Finally, it'll create the Python virtual environment and preload the models.
# This enables a user to install this project without manually installing git or Python
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
export no_cache_dir="--no-cache-dir"
if [ $# -ge 1 ]; then
if [ "$1" = "use-cache" ]; then
export no_cache_dir=""
fi
fi
OS_NAME=$(uname -s)
case "${OS_NAME}" in
Linux*) OS_NAME="linux";;
Darwin*) OS_NAME="darwin";;
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
esac
OS_ARCH=$(uname -m)
case "${OS_ARCH}" in
x86_64*) ;;
arm64*) ;;
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
esac
# https://mamba.readthedocs.io/en/latest/installation.html
MAMBA_OS_NAME=$OS_NAME
MAMBA_ARCH=$OS_ARCH
if [ "$OS_NAME" == "darwin" ]; then
MAMBA_OS_NAME="osx"
fi
if [ "$OS_ARCH" == "linux" ]; then
MAMBA_ARCH="aarch64"
fi
if [ "$OS_ARCH" == "x86_64" ]; then
MAMBA_ARCH="64"
fi
PY_ARCH=$OS_ARCH
if [ "$OS_ARCH" == "arm64" ]; then
PY_ARCH="aarch64"
fi
# Compute device ('cd' segment of reqs files) detect goes here
# This needs a ton of work
# Suggestions:
# - lspci
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
# - Surely there's a similar utility for AMD?
CD="cuda"
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
CD="mps"
fi
# config
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
if [ "$OS_NAME" == "darwin" ]; then
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
elif [ "$OS_NAME" == "linux" ]; then
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
fi
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
PACKAGES_TO_INSTALL=""
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
# (if necessary) install git and conda into a contained environment
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
# download micromamba
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
chmod u+x ./micromamba
# test the mamba binary
echo -e "\n***** Micromamba version: *****\n"
./micromamba --version
# create the installer env
if [ ! -e "$INSTALL_ENV_DIR" ]; then
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
fi
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
if [ ! -e "$INSTALL_ENV_DIR" ]; then
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
exit
fi
fi
rm -f micromamba.exe
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
# Download/unpack/clean up InvokeAI release sourceball
_err_msg="\n----- InvokeAI source download failed -----\n"
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
_err_exit $? _err_msg
_err_msg="\n----- InvokeAI source unpack failed -----\n"
tar -zxf InvokeAI.tgz
_err_exit $? _err_msg
rm -f InvokeAI.tgz
_err_msg="\n----- InvokeAI source copy failed -----\n"
cd InvokeAI-*
cp -r . ..
_err_exit $? _err_msg
cd ..
# cleanup
rm -rf InvokeAI-*/
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
echo -e "\n***** Unpacked InvokeAI source *****\n"
# Download/unpack/clean up python-build-standalone
_err_msg="\n----- Python download failed -----\n"
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
_err_exit $? _err_msg
_err_msg="\n----- Python unpack failed -----\n"
tar -zxf python.tgz
_err_exit $? _err_msg
rm -f python.tgz
echo -e "\n***** Unpacked python-build-standalone *****\n"
# create venv
_err_msg="\n----- problem creating venv -----\n"
if [ "$OS_NAME" == "darwin" ]; then
# patch sysconfig so that extensions can build properly
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
PYTHON_INSTALL_DIR="$(pwd)/python"
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
TMPFILE="$(mktemp)"
chmod +w "${SYSCONFIG}"
cp "${SYSCONFIG}" "${TMPFILE}"
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
rm -f "${TMPFILE}"
fi
./python/bin/python3 -E -s -m venv .venv
_err_exit $? _err_msg
source .venv/bin/activate
echo -e "\n***** Created Python virtual environment *****\n"
# Print venv's Python version
_err_msg="\n----- problem calling venv's python -----\n"
echo -e "We're running under"
.venv/bin/python3 --version
_err_exit $? _err_msg
_err_msg="\n----- pip update failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
_err_exit $? _err_msg
echo -e "\n***** Updated pip *****\n"
_err_msg="\n----- requirements file copy failed -----\n"
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
_err_exit $? _err_msg
_err_msg="\n----- main pip install failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
_err_exit $? _err_msg
echo -e "\n***** Installed Python dependencies *****\n"
_err_msg="\n----- InvokeAI setup failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
_err_exit $? _err_msg
echo -e "\n***** Installed InvokeAI *****\n"
cp binary_installer/invoke.sh.in ./invoke.sh
chmod a+rx ./invoke.sh
echo -e "\n***** Installed invoke launcher script ******\n"
# more cleanup
rm -rf binary_installer/ installer_files/
# preload the models
.venv/bin/python3 scripts/configure_invokeai.py
_err_msg="\n----- model download clone failed -----\n"
_err_exit $? _err_msg
deactivate
echo -e "\n***** Finished downloading models *****\n"
echo "All done! Run the command"
echo " $scriptdir/invoke.sh"
echo "to start InvokeAI."
read -p "Press any key to exit..."
exit

View File

@@ -0,0 +1,36 @@
@echo off
PUSHD "%~dp0"
call .venv\Scripts\activate.bat
echo Do you want to generate images using the
echo 1. command-line
echo 2. browser-based UI
echo OR
echo 3. open the developer console
set /p choice="Please enter 1, 2 or 3: "
if /i "%choice%" == "1" (
echo Starting the InvokeAI command-line.
.venv\Scripts\python scripts\invoke.py %*
) else if /i "%choice%" == "2" (
echo Starting the InvokeAI browser-based UI.
.venv\Scripts\python scripts\invoke.py --web %*
) else if /i "%choice%" == "3" (
echo Developer Console
echo Python command is:
where python
echo Python version is:
python --version
echo *************************
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
echo so that you can troubleshoot this InvokeAI installation as necessary.
echo *************************
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k
) else (
echo Invalid selection
pause
exit /b
)
deactivate

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env sh
set -eu
. .venv/bin/activate
# set required env var for torch on mac MPS
if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
echo "Do you want to generate images using the"
echo "1. command-line"
echo "2. browser-based UI"
echo "OR"
echo "3. open the developer console"
echo "Please enter 1, 2, or 3:"
read choice
case $choice in
1)
printf "\nStarting the InvokeAI command-line..\n";
.venv/bin/python scripts/invoke.py $*;
;;
2)
printf "\nStarting the InvokeAI browser-based UI..\n";
.venv/bin/python scripts/invoke.py --web $*;
;;
3)
printf "\nDeveloper Console:\n";
printf "Python command is:\n\t";
which python;
printf "Python version is:\n\t";
python --version;
echo "*************************"
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
printf "*************************\n"
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
/usr/bin/env "$SHELL";
;;
*)
echo "Invalid selection";
exit
;;
esac

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
InvokeAI
Project homepage: https://github.com/invoke-ai/InvokeAI
Installation on Windows:
NOTE: You might need to enable Windows Long Paths. If you're not sure,
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
file. Note that you will need to have admin privileges in order to
do this.
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
Installation on Linux and Mac:
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
file (on Linux/Mac) to start InvokeAI.

View File

@@ -0,0 +1,33 @@
--prefer-binary
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
--extra-index-url https://download.pytorch.org/whl/cu116
--trusted-host https://download.pytorch.org
accelerate~=0.15
albumentations
diffusers[torch]~=0.11
einops
eventlet
flask_cors
flask_socketio
flaskwebgui==1.0.3
getpass_asterisk
imageio-ffmpeg
pyreadline3
realesrgan
send2trash
streamlit
taming-transformers-rom1504
test-tube
torch-fidelity
torch==1.12.1 ; platform_system == 'Darwin'
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
torchvision==0.13.1 ; platform_system == 'Darwin'
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
transformers
picklescan
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip

View File

@@ -1,18 +1,14 @@
# syntax=docker/dockerfile:1
# Maintained by Matthias Wild <mauwii@outlook.de>
ARG PYTHON_VERSION=3.9
##################
## base image ##
### base image ###
##################
FROM python:${PYTHON_VERSION}-slim AS python-base
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
# prepare for buildkit cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
# Install necessary packages
# Install necesarry packages
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@@ -21,23 +17,40 @@ RUN \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.*
libopencv-dev=4.5.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# set working directory and env
# set working directory and path
ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
# Keeps Python from generating .pyc files in the container
ENV PYTHONDONTWRITEBYTECODE 1
# Turns off buffering for easier container logging
ENV PYTHONUNBUFFERED 1
# don't fall back to legacy build system
ENV PIP_USE_PEP517=1
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
#######################
## build pyproject ##
#######################
######################
### build frontend ###
######################
FROM node:lts as frontend-builder
# Copy Sources
ARG APPDIR=/usr/src
WORKDIR ${APPDIR}
COPY --link . .
# install dependencies and build frontend
WORKDIR ${APPDIR}/invokeai/frontend
RUN \
--mount=type=cache,target=/usr/local/share/.cache/yarn/v6 \
yarn install \
--prefer-offline \
--frozen-lockfile \
--non-interactive \
--production=false \
&& yarn build
###################################
### install python dependencies ###
###################################
FROM python-base AS pyproject-builder
# Install dependencies
@@ -47,57 +60,53 @@ RUN \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.*
# prepare pip for buildkit cache
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
RUN mkdir -p ${PIP_CACHE_DIR}
python3-dev=3.9.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
python3 -m venv "${APPNAME}" \
RUN python3 -m venv "${APPNAME}" \
--upgrade-deps
# copy sources
COPY --link . .
COPY --from=frontend-builder ${APPDIR} .
# install pyproject.toml
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
"${APPNAME}/bin/pip" install .
# build patchmatch
RUN python3 -c "from patchmatch import patch_match"
RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \
"${APPDIR}/${APPNAME}/bin/pip" install \
--use-pep517 \
.
#####################
## runtime image ##
### runtime image ###
#####################
FROM python-base AS runtime
# Create a new user
ARG UNAME=appuser
RUN useradd \
--no-log-init \
-m \
-U \
"${UNAME}"
# setup environment
COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
ENV INVOKEAI_ROOT=/data
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
# create volume directory
ARG VOLUME_DIR=/data
RUN mkdir -p "${VOLUME_DIR}" \
&& chown -R "${UNAME}" "${VOLUME_DIR}"
# build patchmatch
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
&& PYTHONDONTWRITEBYTECODE=1 \
python3 -c "from patchmatch import patch_match" \
&& apt-get remove -y \
--autoremove \
build-essential \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/*
# setup runtime environment
USER ${UNAME}
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
ENV INVOKEAI_ROOT ${VOLUME_DIR}
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
EXPOSE 9090
# set Entrypoint and default CMD
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
VOLUME [ "${VOLUME_DIR}" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]

View File

@@ -1,51 +1,43 @@
#!/usr/bin/env bash
set -e
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
# Possible Values are:
# - cpu
# - cuda
# - rocm
# Don't forget to also set it when executing run.sh
# if it is not set, the script will try to detect the flavor by itself.
#
# Doc can be found here:
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
# CUDA 11.6: https://download.pytorch.org/whl/cu116
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
# CPU: https://download.pytorch.org/whl/cpu
# as found on https://pytorch.org/get-started/locally/
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}"
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
echo -e "Dockerfile: \t${DOCKERFILE}"
echo -e "index-url: \t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename: \t${VOLUMENAME}"
echo -e "Platform: \t${PLATFORM}"
echo -e "Registry: \t${CONTAINER_REGISTRY}"
echo -e "Repository: \t${CONTAINER_REPOSITORY}"
echo -e "Container Tag: \t${CONTAINER_TAG}"
echo -e "Container Image: ${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
echo -n "creating docker volume "
echo -n "createing docker volume "
docker volume create "${VOLUMENAME}"
fi
# Build Container
DOCKER_BUILDKIT=1 docker build \
--platform="${PLATFORM:-linux/amd64}" \
--tag="${CONTAINER_IMAGE:-invokeai}" \
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
docker build \
--platform="${PLATFORM}" \
--tag="${CONTAINER_IMAGE}" \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \
..

View File

@@ -1,51 +1,35 @@
#!/usr/bin/env bash
# This file is used to set environment variables for the build.sh and run.sh scripts.
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Activate virtual environment if not already activated and exists
if [[ -z $VIRTUAL_ENV ]]; then
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
&& echo "Activated virtual environment: $VIRTUAL_ENV"
fi
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
if [[ -z "$CONTAINER_FLAVOR" ]]; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda"
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR=cuda
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/rocm"}"
elif CONTAINER_FLAVOR=cpu; then
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/cpu"}"
fi
fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-linux/${ARCH}}"
PLATFORM="${PLATFORM-Linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"

View File

@@ -1,16 +1,14 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
# Create outputs directory if it does not exist
[[ -d ./outputs ]] || mkdir ./outputs
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
@@ -24,18 +22,10 @@ docker run \
--name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \
--mount=source="${VOLUMENAME}",target=/data \
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${@:+$@}
# Remove Trash folder
for f in outputs/.Trash*; do
if [ -e "$f" ]; then
rm -Rf "$f"
break
fi
done
"${CONTAINER_IMAGE}" ${1:+$@}

View File

@@ -1,5 +0,0 @@
{
"MD046": false,
"MD007": false,
"MD030": false
}

View File

@@ -4,7 +4,7 @@ title: Changelog
# :octicons-log-16: **Changelog**
## v2.3.0 <small>(15 January 2023)</small>
## v2.3.0 <small>(15 January 2023)</small>
**Transition to diffusers
@@ -44,7 +44,7 @@ introduces several changes you should know about.
A configuration stanza for a diffuers model stored locally should
look like this, with a `format` of `diffusers`, but a `path` field
that points at the directory that contains `model_index.json`:
```
waifu-diffusion:
description: Latest waifu diffusion 1.4
@@ -94,7 +94,7 @@ introduces several changes you should know about.
!import_model /opt/sd-models/sd-1.4.ckpt
!import_model https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/blob/main/PaperCut_v1.ckpt
```
**KNOWN BUGS (15 January 2023)
1. On CUDA systems, the 768 pixel stable-diffusion-2.0 and
@@ -261,7 +261,7 @@ sections describe what's new for InvokeAI.
[Installation](installation/index.md).
- A streamlined manual installation process that works for both Conda and
PIP-only installs. See
[Manual Installation](installation/020_INSTALL_MANUAL.md).
[Manual Installation](installation/INSTALL_MANUAL.md).
- The ability to save frequently-used startup options (model to load, steps,
sampler, etc) in a `.invokeai` file. See
[Client](features/CLI.md)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

View File

@@ -1,93 +0,0 @@
# Invoke.AI Architecture
```mermaid
flowchart TB
subgraph apps[Applications]
webui[WebUI]
cli[CLI]
subgraph webapi[Web API]
api[HTTP API]
sio[Socket.IO]
end
end
subgraph invoke[Invoke]
direction LR
invoker
services
sessions
invocations
end
subgraph core[AI Core]
Generate
end
webui --> webapi
webapi --> invoke
cli --> invoke
invoker --> services & sessions
invocations --> services
sessions --> invocations
services --> core
%% Styles
classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A
classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A
class apps,webapi,invoke,core sg
```
## Applications
Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations.
### Web UI
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
| Component | Description |
| --- | --- |
| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API |
| dependencies | Creates all invoker services and the invoker, and provides them to the API |
| events | An eventing system that could in the future be adapted to support horizontal scale-out |
| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) |
| routers | API definitions for different areas of API functionality |
### CLI
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
## Invoke
The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services.
### Invoker
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
- **invocation services**, which are used by invocations to interact with core functionality.
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
### Sessions
Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added.
The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph.
### Invocations
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
### Services
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
## AI Core
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).

View File

@@ -1,105 +0,0 @@
# Invocations
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
## Creating a new invocation
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
An invocation looks like this:
```py
class UpscaleInvocation(BaseInvocation):
"""Upscales an image."""
type: Literal['upscale'] = 'upscale'
# Inputs
image: Union[ImageField,None] = Field(description="The input image")
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
level: Literal[2,4] = Field(default=2, description = "The upscale level")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(self.image.image_type, self.image.image_name)
results = context.services.generate.upscale_and_reconstruct(
image_list = [[image, 0]],
upscale = (self.level, self.strength),
strength = 0.0, # GFPGAN strength
save_original = False,
image_callback = None,
)
# Results are image and seed, unwrap for now
# TODO: can this return multiple results?
image_type = ImageType.RESULT
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput(
image = ImageField(image_type = image_type, image_name = image_name)
)
```
Each portion is important to implement correctly.
### Class definition and type
```py
class UpscaleInvocation(BaseInvocation):
"""Upscales an image."""
type: Literal['upscale'] = 'upscale'
```
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
### Inputs
```py
# Inputs
image: Union[ImageField,None] = Field(description="The input image")
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
level: Literal[2,4] = Field(default=2, description="The upscale level")
```
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
| Part | Value | Description |
| ---- | ----- | ----------- |
| Name | `strength` | This field is referred to as `strength` |
| Type Hint | `float` | This field must be of type `float` |
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
### Invoke Function
```py
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(self.image.image_type, self.image.image_name)
results = context.services.generate.upscale_and_reconstruct(
image_list = [[image, 0]],
upscale = (self.level, self.strength),
strength = 0.0, # GFPGAN strength
save_original = False,
image_callback = None,
)
# Results are image and seed, unwrap for now
image_type = ImageType.RESULT
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput(
image = ImageField(image_type = image_type, image_name = image_name)
)
```
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
### Outputs
```py
class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
type: Literal['image'] = 'image'
image: ImageField = Field(default=None, description="The output image")
```
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.

View File

@@ -6,51 +6,38 @@ title: Command-Line Interface
## **Interactive Command Line Interface**
The InvokeAI command line interface (CLI) provides scriptable access
to InvokeAI's features.Some advanced features are only available
through the CLI, though they eventually find their way into the WebUI.
The `invoke.py` script, located in `scripts/`, provides an interactive interface
to image generation similar to the "invoke mothership" bot that Stable AI
provided on its Discord server.
The CLI is accessible from the `invoke.sh`/`invoke.bat` launcher by
selecting option (1). Alternatively, it can be launched directly from
the command line by activating the InvokeAI environment and giving the
command:
```bash
invokeai
```
After some startup messages, you will be presented with the `invoke> `
prompt. Here you can type prompts to generate images and issue other
commands to load and manipulate generative models. The CLI has a large
number of command-line options that control its behavior. To get a
concise summary of the options, call `invokeai` with the `--help` argument:
```bash
invokeai --help
```
Unlike the `txt2img.py` and `img2img.py` scripts provided in the original
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion) source
code repository, the time-consuming initialization of the AI model
initialization only happens once. After that image generation from the
command-line interface is very fast.
The script uses the readline library to allow for in-line editing, command
history (++up++ and ++down++), autocompletion, and more. To help keep track of
which prompts generated which images, the script writes a log file of image
names and prompts to the selected output directory.
Here is a typical session
In addition, as of version 1.02, it also writes the prompt into the PNG file's
metadata where it can be retrieved using `scripts/images2prompt.py`
The script is confirmed to work on Linux, Windows and Mac systems.
!!! note
This script runs from the command-line or can be used as a Web application. The Web GUI is
currently rudimentary, but a much better replacement is on its way.
```bash
PS1:C:\Users\fred> invokeai
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
* Initializing, be patient...
* Initializing, be patient...
>> Initialization file /home/lstein/invokeai/invokeai.init found. Loading...
>> Internet connectivity is True
>> InvokeAI, version 2.3.0-rc5
>> InvokeAI runtime directory is "/home/lstein/invokeai"
>> GFPGAN Initialized
>> CodeFormer Initialized
>> ESRGAN Initialized
>> Using device_type cuda
>> xformers memory-efficient attention is available and enabled
(...more initialization messages...)
* Initialization done! Awaiting your command (-h for help, 'q' to quit)
Loading model from models/ldm/text2img-large/model.ckpt
(...more initialization messages...)
* Initialization done! Awaiting your command...
invoke> ashley judd riding a camel -n2 -s150
Outputs:
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
@@ -60,15 +47,27 @@ invoke> "there's a fly in my soup" -n6 -g
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
invoke> q
# this shows how to retrieve the prompt stored in the saved image's metadata
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
00009.png: "ashley judd riding a camel" -s150 -S 416354203
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
```
![invoke-py-demo](../assets/dream-py-demo.png)
The `invoke>` prompt's arguments are pretty much identical to those used in the
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you
do). A significant change is that creation of individual images is now the
default unless `--grid` (`-g`) is given. A full list is given in
[List of prompt arguments](#list-of-prompt-arguments).
## Arguments
The script recognizes a series of command-line switches that will
change important global defaults, such as the directory for image
outputs and the location of the model weight files.
The script itself also recognizes a series of command-line switches that will
change important global defaults, such as the directory for image outputs and
the location of the model weight files.
### List of arguments recognized at the command line
@@ -83,14 +82,10 @@ overridden on a per-prompt basis (see
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Location for generated images. |
| `--prompt_as_dir` | `-p` | `False` | Name output directories using the prompt text. |
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
| `--model <modelname>` | | `stable-diffusion-1.5` | Loads the initial model specified in configs/models.yaml. |
| `--ckpt_convert ` | | `False` | If provided both .ckpt and .safetensors files will be auto-converted into diffusers format in memory |
| `--autoconvert <path>` | | `None` | On startup, scan the indicated directory for new .ckpt/.safetensor files and automatically convert and import them |
| `--precision` | | `fp16` | Provide `fp32` for full precision mode, `fp16` for half-precision. `fp32` needed for Macintoshes and some NVidia cards. |
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
| `--png_compression <0-9>` | `-z<0-9>` | `6` | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
| `--safety-checker` | | `False` | Activate safety checker for NSFW and other potentially disturbing imagery |
| `--patchmatch`, `--no-patchmatch` | | `--patchmatch` | Load/Don't load the PatchMatch inpainting extension |
| `--xformers`, `--no-xformers` | | `--xformers` | Load/Don't load the Xformers memory-efficient attention module (CUDA only) |
| `--web` | | `False` | Start in web server mode |
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
@@ -114,7 +109,6 @@ overridden on a per-prompt basis (see
| Argument | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| `--full_precision` | | `False` | Same as `--precision=fp32`|
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
@@ -214,8 +208,6 @@ Here are the invoke> command that apply to txt2img:
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
!!! note
@@ -344,10 +336,8 @@ useful for debugging the text masking process prior to inpainting with the
### Model selection and importation
The CLI allows you to add new models on the fly, as well as to switch
among them rapidly without leaving the script. There are several
different model formats, each described in the [Model Installation
Guide](../installation/050_INSTALLING_MODELS.md).
The CLI allows you to add new models on the fly, as well as to switch among them
rapidly without leaving the script.
#### `!models`
@@ -357,9 +347,9 @@ model is bold-faced
Example:
<pre>
inpainting-1.5 not loaded Stable Diffusion inpainting model
<b>stable-diffusion-1.5 active Stable Diffusion v1.5</b>
waifu-diffusion not loaded Waifu Diffusion v1.4
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion not loaded Waifu Diffusion v1.3
</pre>
#### `!switch <model>`
@@ -371,30 +361,43 @@ Note how the second column of the `!models` table changes to `cached` after a
model is first loaded, and that the long initialization step is not needed when
loading a cached model.
#### `!import_model <hugging_face_repo_ID>`
<pre>
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 cached Stable Diffusion v1.4</b>
waifu-diffusion active Waifu Diffusion v1.3
This imports and installs a `diffusers`-style model that is stored on
the [HuggingFace Web Site](https://huggingface.co). You can look up
any [Stable Diffusion diffusers
model](https://huggingface.co/models?library=diffusers) and install it
with a command like the following:
invoke> !switch waifu-diffusion
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
>> Model loaded in 18.24s
>> Max VRAM used to load the model: 2.17G
>> Current VRAM usage:2.17G
>> Setting Sampler to k_lms
```bash
!import_model prompthero/openjourney
```
invoke> !models
laion400m not loaded <no description>
stable-diffusion-1.4 cached Stable Diffusion v1.4
<b>waifu-diffusion active Waifu Diffusion v1.3</b>
#### `!import_model <path/to/diffusers/directory>`
invoke> !switch stable-diffusion-1.4
>> Caching model waifu-diffusion in system RAM
>> Retrieving model stable-diffusion-1.4 from system RAM cache
>> Setting Sampler to k_lms
If you have a copy of a `diffusers`-style model saved to disk, you can
import it by passing the path to model's top-level directory.
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion cached Waifu Diffusion v1.3
</pre>
#### `!import_model <url>`
For a `.ckpt` or `.safetensors` file, if you have a direct download
URL for the file, you can provide it to `!import_model` and the file
will be downloaded and installed for you.
#### `!import_model <path/to/model/weights.ckpt>`
#### `!import_model <path/to/model/weights>`
This command imports a new model weights file into InvokeAI, makes it available
for image generation within the script, and writes out the configuration for the
@@ -414,12 +417,35 @@ below, the bold-faced text shows what the user typed in with the exception of
the width, height and configuration file paths, which were filled in
automatically.
#### `!import_model <path/to/directory_of_models>`
Example:
If you provide the path of a directory that contains one or more
`.ckpt` or `.safetensors` files, the CLI will scan the directory and
interactively offer to import the models it finds there. Also see the
`--autoconvert` command-line option.
<pre>
invoke> <b>!import_model models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt</b>
>> Model import in process. Please enter the values needed to configure this model:
Name for this model: <b>waifu-diffusion</b>
Description of this model: <b>Waifu Diffusion v1.3</b>
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
Default image width: <b>512</b>
Default image height: <b>512</b>
>> New configuration:
waifu-diffusion:
config: configs/stable-diffusion/v1-inference.yaml
description: Waifu Diffusion v1.3
height: 512
weights: models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
width: 512
OK to import [n]? <b>y</b>
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
invoke>
</pre>
#### `!edit_model <name_of_model>`
@@ -453,6 +479,11 @@ OK to import [n]? y
...
</pre>
======= invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3 ...lots of
text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
### History processing
The CLI provides a series of convenient commands for reviewing previous actions,

View File

@@ -1,5 +1,5 @@
---
title: Styles and Subjects
title: Concepts Library
---
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
@@ -25,14 +25,10 @@ library which downloads and merges TI files automatically upon request. You can
also install your own or others' TI files by placing them in a designated
directory.
You may also be interested in using [LoRA Models](LORAS.md) to
generate images with specialized styles and subjects.
### An Example
Here are a few examples to illustrate how Textual Inversion works. All
these images were generated using the command-line client and the
Stable Diffusion 1.5 model:
Here are a few examples to illustrate how it works. All these images were
generated using the command-line client and the Stable Diffusion 1.5 model:
| Japanese gardener | Japanese gardener &lt;ghibli-face&gt; | Japanese gardener &lt;hoi4-leaders&gt; | Japanese gardener &lt;cartoona-animals&gt; |
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
@@ -113,50 +109,21 @@ For example, TI files generated by the Hugging Face toolkit share the named
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
files it finds there. At startup you will see messages similar to these:
files it finds there. At startup you will see a message similar to this one:
```bash
>> Loading embeddings from /data/lstein/invokeai-2.3/embeddings
| Loading v1 embedding file: style-hamunaptra
| Loading v4 embedding file: embeddings/learned_embeds-steps-500.bin
| Loading v2 embedding file: lfa
| Loading v3 embedding file: easynegative
| Loading v1 embedding file: rem_rezero
| Loading v2 embedding file: midj-strong
| Loading v4 embedding file: anime-background-style-v2/learned_embeds.bin
| Loading v4 embedding file: kamon-style/learned_embeds.bin
** Notice: kamon-style/learned_embeds.bin was trained on a model with an incompatible token dimension: 768 vs 1024.
>> Textual inversion triggers: <anime-background-style-v2>, <easynegative>, <lfa>, <midj-strong>, <milo>, Rem3-2600, Style-Hamunaptra
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
```
Textual Inversion embeddings trained on version 1.X stable diffusion
models are incompatible with version 2.X models and vice-versa.
Note the `*` trigger term. This is a placeholder term that many early TI
tutorials taught people to use rather than a more descriptive term.
Unfortunately, if you have multiple TI files that all use this term, only the
first one loaded will be triggered by use of the term.
After the embeddings load, InvokeAI will print out a list of all the
recognized trigger terms. To trigger the term, include it in the
prompt exactly as written, including angle brackets if any and
respecting the capitalization.
There are at least four different embedding file formats, and each uses
a different convention for the trigger terms. In some cases, the
trigger term is specified in the file contents and may or may not be
surrounded by angle brackets. In the example above, `Rem3-2600`,
`Style-Hamunaptra`, and `<midj-strong>` were specified this way and
there is no easy way to change the term.
In other cases the trigger term is not contained within the embedding
file. In this case, InvokeAI constructs a trigger term consisting of
the base name of the file (without the file extension) surrounded by
angle brackets. In the example above `<easynegative`> is such a file
(the filename was `easynegative.safetensors`). In such cases, you can
change the trigger term simply by renaming the file.
## Training your own Textual Inversion models
InvokeAI provides a script that lets you train your own Textual
Inversion embeddings using a small number (about a half-dozen) images
of your desired style or subject. Please see [Textual
Inversion](TEXTUAL_INVERSION.md) for details.
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
or more TI files together. If it encounters a collision of terms, the script
will prompt you to select new terms that do not collide. See
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
## Further Reading

View File

@@ -4,24 +4,13 @@ title: Image-to-Image
# :material-image-multiple: Image-to-Image
Both the Web and command-line interfaces provide an "img2img" feature
that lets you seed your creations with an initial drawing or
photo. This is a really cool feature that tells stable diffusion to
build the prompt on top of the image you provide, preserving the
original's basic shape and layout.
## `img2img`
See the [WebUI Guide](WEB.md) for a walkthrough of the img2img feature
in the InvokeAI web server. This document describes how to use img2img
in the command-line tool.
## Basic Usage
Launch the command-line client by launching `invoke.sh`/`invoke.bat`
and choosing option (1). Alternative, activate the InvokeAI
environment and issue the command `invokeai`.
Once the `invoke> ` prompt appears, you can start an img2img render by
pointing to a seed file with the `-I` option as shown here:
This script also provides an `img2img` feature that lets you seed your creations
with an initial drawing or photo. This is a really cool feature that tells
stable diffusion to build the prompt on top of the image you provide, preserving
the original's basic shape and layout. To use it, provide the `--init_img`
option as shown here:
!!! example ""

View File

@@ -1,100 +0,0 @@
---
title: Low-Rank Adaptation (LoRA) Models
---
# :material-library-shelves: Using Low-Rank Adaptation (LoRA) Models
## Introduction
LoRA is a technique for fine-tuning Stable Diffusion models using much
less time and memory than traditional training techniques. The
resulting model files are much smaller than full model files, and can
be used to generate specialized styles and subjects.
LoRAs are built on top of Stable Diffusion v1.x or 2.x checkpoint or
diffusers models. To load a LoRA, you include its name in the text
prompt using a simple syntax described below. While you will generally
get the best results when you use the same model the LoRA was trained
on, they will work to a greater or lesser extent with other models.
The major caveat is that a LoRA built on top of a SD v1.x model cannot
be used with a v2.x model, and vice-versa. If you try, you will get an
error! You may refer to multiple LoRAs in your prompt.
When you apply a LoRA in a prompt you can specify a weight. The higher
the weight, the more influence it will have on the image. Useful
ranges for weights are usually in the 0.0 to 1.0 range (with ranges
between 0.5 and 1.0 being most typical). However you can specify a
higher weight if you wish. Like models, each LoRA has a slightly
different useful weight range and will interact with other generation
parameters such as the CFG, step count and sampler. The author of the
LoRA will often provide guidance on the best settings, but feel free
to experiment. Be aware that it often helps to reduce the CFG value
when using LoRAs.
## Installing LoRAs
This is very easy! Download a LoRA model file from your favorite site
(e.g. [CIVITAI](https://civitai.com) and place it in the `loras`
folder in the InvokeAI root directory (usually `~invokeai/loras` on
Linux/Macintosh machines, and `C:\Users\your-name\invokeai/loras` on
Windows systems). If the `loras` folder does not already exist, just
create it. The vast majority of LoRA models use the Kohya file format,
which is a type of `.safetensors` file.
You may change where InvokeAI looks for the `loras` folder by passing the
`--lora_directory` option to the `invoke.sh`/`invoke.bat` launcher, or
by placing the option in `invokeai.init`. For example:
```
invoke.sh --lora_directory=C:\Users\your-name\SDModels\lora
```
## Using a LoRA in your prompt
To activate a LoRA use the syntax `withLora(my-lora-name,weight)`
somewhere in the text of the prompt. The position doesn't matter; use
whatever is most comfortable for you.
For example, if you have a LoRA named `parchment_people.safetensors`
in your `loras` directory, you can load it with a weight of 0.9 with a
prompt like this one:
```
family sitting at dinner table withLora(parchment_people,0.9)
```
Add additional `withLora()` phrases to load more LoRAs.
You may omit the weight entirely to default to a weight of 1.0:
```
family sitting at dinner table withLora(parchment_people)
```
If you watch the console as your prompt executes, you will see
messages relating to the loading and execution of the LoRA. If things
don't work as expected, note down the console messages and report them
on the InvokeAI Issues pages or Discord channel.
That's pretty much all you need to know!
## Training Kohya Models
InvokeAI cannot currently train LoRA models, but it can load and use
existing LoRA ones to generate images. While there are several LoRA
model file formats, the predominant one is ["Kohya"
format](https://github.com/kohya-ss/sd-scripts), written by [Kohya
S.](https://github.com/kohya-ss). InvokeAI provides support for this
format. For creating your own Kohya models, we recommend the Windows
GUI written by former InvokeAI-team member
[bmaltais](https://github.com/bmaltais), which can be found at
[kohya_ss](https://github.com/bmaltais/kohya_ss).
We can also recommend the [HuggingFace DreamBooth Training
UI](https://huggingface.co/spaces/lora-library/LoRA-DreamBooth-Training-UI),
a paid service that supports both Textual Inversion and LoRA training.
You may also be interested in [Textual
Inversion](TEXTUAL_INVERSION.md) training, which is supported by
InvokeAI as a text console and command-line tool.

View File

@@ -40,7 +40,7 @@ for adj in adjectives:
print(f'a {adj} day -A{samp} -C{cg}')
```
Its output looks like this (abbreviated):
It's output looks like this (abbreviated):
```bash
a sunny day -Aklms -C7.5

View File

@@ -54,7 +54,8 @@ Please enter 1, 2, 3, or 4: [1] 3
```
From the command line, with the InvokeAI virtual environment active,
you can launch the front end with the command `invokeai-ti --gui`.
you can launch the front end with the command `textual_inversion
--gui`.
This will launch a text-based front end that will look like this:
@@ -154,11 +155,8 @@ training sets will converge with 2000-3000 steps.
This adjusts how many training images are processed simultaneously in
each step. Higher values will cause the training process to run more
quickly, but use more memory. The default size is selected based on
whether you have the `xformers` memory-efficient attention library
installed. If `xformers` is available, the batch size will be 8,
otherwise 3. These values were chosen to allow training to run with
GPUs with as little as 12 GB VRAM.
quickly, but use more memory. The default size will run with GPUs with
as little as 12 GB.
### Learning rate
@@ -175,10 +173,8 @@ learning rate to improve performance.
### Use xformers acceleration
This will activate XFormers memory-efficient attention, which will
reduce memory requirements by half or more and allow you to select a
higher batch size. You need to have XFormers installed for this to
have an effect.
This will activate XFormers memory-efficient attention. You need to
have XFormers installed for this to have an effect.
### Learning rate scheduler
@@ -231,12 +227,12 @@ It accepts a large number of arguments, which can be summarized by
passing the `--help` argument:
```sh
invokeai-ti --help
textual_inversion --help
```
Typical usage is shown here:
```sh
invokeai-ti \
textual_inversion \
--model=stable-diffusion-1.5 \
--resolution=512 \
--learnable_property=style \
@@ -255,67 +251,6 @@ invokeai-ti \
--only_save_embeds
```
## Using Distributed Training
If you have multiple GPUs on one machine, or a cluster of GPU-enabled
machines, you can activate distributed training. See the [HuggingFace
Accelerate pages](https://huggingface.co/docs/accelerate/index) for
full information, but the basic recipe is:
1. Enter the InvokeAI developer's console command line by selecting
option [8] from the `invoke.sh`/`invoke.bat` script.
2. Configurate Accelerate using `accelerate config`:
```sh
accelerate config
```
This will guide you through the configuration process, including
specifying how many machines you will run training on and the number
of GPUs pe rmachine.
You only need to do this once.
3. Launch training from the command line using `accelerate launch`. Be sure
that your current working directory is the InvokeAI root directory (usually
named `invokeai` in your home directory):
```sh
accelerate launch .venv/bin/invokeai-ti \
--model=stable-diffusion-1.5 \
--resolution=512 \
--learnable_property=object \
--initializer_token='*' \
--placeholder_token='<shraddha>' \
--train_data_dir=/home/lstein/invokeai/text-inversion-training-data/shraddha \
--output_dir=/home/lstein/invokeai/text-inversion-training/shraddha \
--scale_lr \
--train_batch_size=10 \
--gradient_accumulation_steps=4 \
--max_train_steps=2000 \
--learning_rate=0.0005 \
--lr_scheduler=constant \
--mixed_precision=fp16 \
--only_save_embeds
```
## Using Embeddings
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
These will be automatically loaded when you start InvokeAI.
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
**Note:** `.pt` embeddings do not require the angle brackets.
## Troubleshooting
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
## Reading
For more information on textual inversion, please see the following
@@ -332,4 +267,4 @@ resources:
---
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team

View File

@@ -5,14 +5,11 @@ title: InvokeAI Web Server
# :material-web: InvokeAI Web Server
As of version 2.0.0, this distribution comes with a full-featured web server
(see screenshot).
To use it, launch the `invoke.sh`/`invoke.bat` script and select
option (2). Alternatively, with the InvokeAI environment active, run
the `invokeai` script by adding the `--web` option:
(see screenshot). To use it, run the `invoke.py` script by adding the `--web`
option:
```bash
invokeai --web
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
```
You can then connect to the server by pointing your web browser at
@@ -22,23 +19,17 @@ address of the host you are running it on, or the wildcard `0.0.0.0`. For
example:
```bash
invoke.sh --host 0.0.0.0
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
```
or
## Quick guided walkthrough of the WebGUI's features
```bash
invokeai --web --host 0.0.0.0
```
## Quick guided walkthrough of the WebUI's features
While most of the WebUI's features are intuitive, here is a guided walkthrough
While most of the WebGUI's features are intuitive, here is a guided walkthrough
through its various components.
![Invoke Web Server - Major Components](../assets/invoke-web-server-1.png){:width="640px"}
The screenshot above shows the Text to Image tab of the WebUI. There are three
The screenshot above shows the Text to Image tab of the WebGUI. There are three
main sections:
1. A **control panel** on the left, which contains various settings for text to
@@ -72,14 +63,12 @@ From top to bottom, these are:
1. Text to Image - generate images from text
2. Image to Image - from an uploaded starting image (drawing or photograph)
generate a new one, modified by the text prompt
3. Unified Canvas - Interactively combine multiple images, extend them
with outpainting,and modify interior portions of the image with
inpainting, erase portions of a starting image and have the AI fill in
the erased region from a text prompt.
4. Workflow Management (not yet implemented) - this panel will allow you to create
pipelines of common operations and combine them into workflows.
5. Training (not yet implemented) - this panel will provide an interface to [textual
inversion training](TEXTUAL_INVERSION.md) and fine tuning.
3. Inpainting (pending) - Interactively erase portions of a starting image and
have the AI fill in the erased region from a text prompt.
4. Outpainting (pending) - Interactively add blank space to the borders of a
starting image and fill in the background from a text prompt.
5. Postprocessing (pending) - Interactively postprocess generated images using a
variety of filters.
The inpainting, outpainting and postprocessing tabs are currently in
development. However, limited versions of their features can already be accessed
@@ -87,18 +76,18 @@ through the Text to Image and Image to Image tabs.
## Walkthrough
The following walkthrough will exercise most (but not all) of the WebUI's
The following walkthrough will exercise most (but not all) of the WebGUI's
feature set.
### Text to Image
1. Launch the WebUI using `python scripts/invoke.py --web` and connect to it
1. Launch the WebGUI using `python scripts/invoke.py --web` and connect to it
with your browser by accessing `http://localhost:9090`. If the browser and
server are running on different machines on your LAN, add the option
`--host 0.0.0.0` to the launch command line and connect to the machine
hosting the web server using its IP address or domain name.
2. If all goes well, the WebUI should come up and you'll see a green
2. If all goes well, the WebGUI should come up and you'll see a green
`connected` message on the upper right.
#### Basics
@@ -245,7 +234,7 @@ walkthrough.
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
the blank area to get an upload dialog. The image will load into an area
marked _Initial Image_. (The WebUI will also load the most
marked _Initial Image_. (The WebGUI will also load the most
recently-generated image from the gallery into a section on the left, but
this image will be replaced in the next step.)
@@ -295,17 +284,13 @@ initial image" icons are located.
![Invoke Web Server - Use as Image Links](../assets/invoke-web-server-9.png){:width="640px"}
### Unified Canvas
See the [Unified Canvas Guide](UNIFIED_CANVAS.md)
## Parting remarks
This concludes the walkthrough, but there are several more features that you can
explore. Please check out the [Command Line Interface](CLI.md) documentation for
further explanation of the advanced features that were not covered here.
The WebUI is only rapid development. Check back regularly for updates!
The WebGUI is only rapid development. Check back regularly for updates!
## Reference

View File

@@ -2,84 +2,4 @@
title: Overview
---
- The Basics
- The [Web User Interface](WEB.md)
Guide to the Web interface. Also see the
[WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
- The [Unified Canvas](UNIFIED_CANVAS.md)
Build complex scenes by combine and modifying multiple images in a
stepwise fashion. This feature combines img2img, inpainting and
outpainting in a single convenient digital artist-optimized user
interface.
- The [Command Line Interface (CLI)](CLI.md)
Scriptable access to InvokeAI's features.
- [Visual Manual for InvokeAI](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
- Image Generation
- [Prompt Engineering](PROMPTS.md)
Get the images you want with the InvokeAI prompt engineering language.
- [Post-Processing](POSTPROCESS.md)
Restore mangled faces and make images larger with upscaling. Also see
the [Embiggen Upscaling Guide](EMBIGGEN.md).
- The [Concepts Library](CONCEPTS.md)
Add custom subjects and styles using HuggingFace's repository of
embeddings.
- [Image-to-Image Guide for the CLI](IMG2IMG.md)
Use a seed image to build new creations in the CLI.
- [Inpainting Guide for the CLI](INPAINTING.md)
Selectively erase and replace portions of an existing image in the CLI.
- [Outpainting Guide for the CLI](OUTPAINTING.md)
Extend the borders of the image with an "outcrop" function within the
CLI.
- [Generating Variations](VARIATIONS.md)
Have an image you like and want to generate many more like it?
Variations are the ticket.
- Model Management
- [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This guide
also covers optimizing models to load quickly.
- [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a new
model that combines characteristics of the originals.
- [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects.
- Other Features
- [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images.
- [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more!
Here you can find the documentation for different features.

View File

@@ -1,4 +0,0 @@
# :octicons-file-code-16: IDE-Settings
Here we will share settings for IDEs used by our developers, maybe you can find
something interestening which will help to boost your development efficency 🔥

View File

@@ -1,250 +0,0 @@
---
title: Visual Studio Code
---
# :material-microsoft-visual-studio-code:Visual Studio Code
The Workspace Settings are stored in the project (repository) root and get
higher priorized than your user settings.
This helps to have different settings for different projects, while the user
settings get used as a default value if no workspace settings are provided.
## tasks.json
First we will create a task configuration which will create a virtual
environment and update the deps (pip, setuptools and wheel).
Into this venv we will then install the pyproject.toml in editable mode with
dev, docs and test dependencies.
```json title=".vscode/tasks.json"
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Create virtual environment",
"detail": "Create .venv and upgrade pip, setuptools and wheel",
"command": "python3",
"args": [
"-m",
"venv",
".venv",
"--prompt",
"InvokeAI",
"--upgrade-deps"
],
"runOptions": {
"instanceLimit": 1,
"reevaluateOnRerun": true
},
"group": {
"kind": "build"
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
},
{
"label": "build InvokeAI",
"detail": "Build pyproject.toml with extras dev, docs and test",
"command": "${workspaceFolder}/.venv/bin/python3",
"args": [
"-m",
"pip",
"install",
"--use-pep517",
"--editable",
".[dev,docs,test]"
],
"dependsOn": "Create virtual environment",
"dependsOrder": "sequence",
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
}
]
}
```
The fastest way to build InvokeAI now is ++cmd+shift+b++
## launch.json
This file is used to define debugger configurations, so that you can one-click
launch and monitor the application, set halt points to inspect specific states,
...
```json title=".vscode/launch.json"
{
"version": "0.2.0",
"configurations": [
{
"name": "invokeai web",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "invokeai cli",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "mkdocs serve",
"type": "python",
"request": "launch",
"program": ".venv/bin/mkdocs",
"args": ["serve"],
"justMyCode": true
}
]
}
```
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
you have created a virtual environment via the [tasks](#tasksjson) from the
previous step.)
## extensions.json
A list of recommended vscode-extensions to make your life easier:
```json title=".vscode/extensions.json"
{
"recommendations": [
"editorconfig.editorconfig",
"github.vscode-pull-request-github",
"ms-python.black-formatter",
"ms-python.flake8",
"ms-python.isort",
"ms-python.python",
"ms-python.vscode-pylance",
"redhat.vscode-yaml",
"tamasfe.even-better-toml",
"eamodio.gitlens",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"esbenp.prettier-vscode",
"davidanson.vscode-markdownlint",
"yzhang.markdown-all-in-one",
"bierner.github-markdown-preview",
"ms-azuretools.vscode-docker",
"mads-hartmann.bash-ide-vscode"
]
}
```
## settings.json
With bellow settings your files already get formated when you save them (only
your modifications if available), which will help you to not run into trouble
with the pre-commit hooks. If the hooks fail, they will prevent you from
commiting, but most hooks directly add a fixed version, so that you just need to
stage and commit them:
```json title=".vscode/settings.json"
{
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.quickSuggestions": {
"comments": false,
"strings": true,
"other": true
},
"editor.suggest.insertMode": "replace",
"gitlens.codeLens.scopes": ["document"]
},
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "file"
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.rulers": [80],
"editor.unicodeHighlight.ambiguousCharacters": false,
"editor.unicodeHighlight.invisibleCharacters": false,
"diffEditor.ignoreTrimWhitespace": false,
"editor.wordWrap": "on",
"editor.quickSuggestions": {
"comments": "off",
"strings": "off",
"other": "off"
},
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[shellscript]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[ignore]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"editor.rulers": [88],
"evenBetterToml.formatter.alignEntries": false,
"evenBetterToml.formatter.allowedBlankLines": 1,
"evenBetterToml.formatter.arrayAutoExpand": true,
"evenBetterToml.formatter.arrayTrailingComma": true,
"evenBetterToml.formatter.arrayAutoCollapse": true,
"evenBetterToml.formatter.columnWidth": 88,
"evenBetterToml.formatter.compactArrays": true,
"evenBetterToml.formatter.compactInlineTables": true,
"evenBetterToml.formatter.indentEntries": false,
"evenBetterToml.formatter.inlineTableExpand": true,
"evenBetterToml.formatter.reorderArrays": true,
"evenBetterToml.formatter.reorderKeys": true,
"evenBetterToml.formatter.compactEntries": false,
"evenBetterToml.schema.enabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"python.languageServer": "Pylance",
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": [
"tests",
"--cov=ldm",
"--cov-branch",
"--cov-report=term:skip-covered"
],
"yaml.schemas": {
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
}
}
```

View File

@@ -1,135 +0,0 @@
---
title: Pull-Request
---
# :octicons-git-pull-request-16: Pull-Request
## pre-requirements
To follow the steps in this tutorial you will need:
- [GitHub](https://github.com) account
- [git](https://git-scm.com/downloads) source controll
- Text / Code Editor (personally I preffer
[Visual Studio Code](https://code.visualstudio.com/Download))
- Terminal:
- If you are on Linux/MacOS you can use bash or zsh
- for Windows Users the commands are written for PowerShell
## Fork Repository
The first step to be done if you want to contribute to InvokeAI, is to fork the
rpeository.
Since you are already reading this doc, the easiest way to do so is by clicking
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
in the top right.
## Clone your fork
After you forked the Repository, you should clone it to your dev machine:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git clone https://github.com/<github username>/InvokeAI \
&& cd InvokeAI
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git clone https://github.com/<github username>/InvokeAI `
&& cd InvokeAI
```
## Install in Editable Mode
To install InvokeAI in editable mode, (as always) we recommend to create and
activate a venv first. Afterwards you can install the InvokeAI Package,
including dev and docs extras in editable mode, follwed by the installation of
the pre-commit hook:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
python -m venv .venv \
--prompt InvokeAI \
--upgrade-deps \
&& source .venv/bin/activate \
&& pip install \
--upgrade-deps \
--use-pep517 \
--editable=".[dev,docs]" \
&& pre-commit install
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
python -m venv .venv `
--prompt InvokeAI `
--upgrade-deps `
&& .venv/scripts/activate.ps1 `
&& pip install `
--upgrade `
--use-pep517 `
--editable=".[dev,docs]" `
&& pre-commit install
```
## Create a branch
Make sure you are on main branch, from there create your feature branch:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git checkout main \
&& git pull \
&& git checkout -B <branch name>
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git checkout main `
&& git pull `
&& git checkout -B <branch name>
```
## Commit your changes
When you are done with adding / updating content, you need to commit those
changes to your repository before you can actually open an PR:
```{ .sh .annotate }
git add <files you have changed> # (1)!
git commit -m "A commit message which describes your change"
git push
```
1. Replace this with a space seperated list of the files you changed, like:
`README.md foo.sh bar.json baz`
## Create a Pull Request
After pushing your changes, you are ready to create a Pull Request. just head
over to your fork on [GitHub](https://github.com), which should already show you
a message that there have been recent changes on your feature branch and a green
button which you could use to create the PR.
The default target for your PRs would be the main branch of
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
the GitHub CLI in a VS-Code Terminal Window 🤭):
```sh
gh pr create
```
The CLI will inform you if there are still unpushed commits on your branch. It
will also prompt you for things like the the Title and the Body (Description) if
you did not already pass them as arguments.

View File

@@ -1,26 +0,0 @@
---
title: Issues
---
# :octicons-issue-opened-16: Issues
## :fontawesome-solid-bug: Report a bug
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
you
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
to inform us about the details so that our developers can look into it.
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
find out how to create a Pull Request.
## Request a feature
If you have a idea for a new feature on your mind which you would like to see in
InvokeAI, there is a
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
available in the issues section of the repository.
If you are just curious which features already got requested you can find the
overview of open requests
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)

View File

@@ -1,32 +0,0 @@
---
title: docs
---
# :simple-readthedocs: MkDocs-Material
If you want to contribute to the docs, there is a easy way to verify the results
of your changes before commiting them.
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
already
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
When installed it's as simple as:
```sh
mkdocs serve
```
This will build the docs locally and serve them on your local host, even
auto-refresh is included, so you can just update a doc, save it and tab to the
browser, without the needs of restarting the `mkdocs serve`.
More information about the "mkdocs flavored markdown syntax" can be found
[here](https://squidfunk.github.io/mkdocs-material/reference/).
## :material-microsoft-visual-studio-code:VS-Code
We also provide a
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
the formatting since this is automated via prettier, but this is of course not
limited to VS-Code.

View File

@@ -1,76 +0,0 @@
# Tranformation to nodes
## Current state
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| generate(generate);
web --> |txt2img| generate(generate);
cli --> |txt2img| generate(generate);
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
## Transitional Architecture
### first step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(Img2img node);
web --> |txt2img| generate(generate);
img2img_node --> model_manager;
img2img_node --> generators;
cli --> |txt2img| generate;
cli --> |img2img| generate;
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
### second step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(img2img node);
img2img_node --> model_manager;
img2img_node --> generators;
web --> |txt2img| txt2img_node(txt2img node);
cli --> |txt2img| txt2img_node;
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
txt2img_node --> model_manager;
txt2img_node --> generators;
txt2img_node --> ti_manager[TI Manager];
```
## Final Architecture
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img|img2img_node(img2img node);
cli --> |img2img|img2img_node;
web --> |txt2img|txt2img_node(txt2img node);
cli --> |txt2img|txt2img_node;
img2img_node --> model_manager;
txt2img_node --> model_manager;
img2img_node --> generators;
txt2img_node --> generators;
img2img_node --> ti_manager[TI Manager];
txt2img_node --> ti_manager[TI Manager];
```

View File

@@ -1,16 +0,0 @@
---
title: Contributing
---
# :fontawesome-solid-code-commit: Contributing
There are different ways how you can contribute to
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
Issues for Bugs or ideas how to improve.
This Section of the docs will explain some of the different ways of how you can
contribute to make it easier for newcommers as well as advanced users :nerd:
If you want to contribute code, but you do not have an exact idea yet, take a
look at the currently open
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)

View File

@@ -1,12 +0,0 @@
# :material-help:Help
If you are looking for help with the installation of InvokeAI, please take a
look into the [Installation](../installation/index.md) section of the docs.
Here you will find help to topics like
- how to contribute
- configuration recommendation for IDEs
If you have an Idea about what's missing and aren't scared from contributing,
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.

19
docs/index.html Normal file
View File

@@ -0,0 +1,19 @@
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Swagger UI</title>
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
</head>
<body>
<div id="swagger-ui"></div>
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
</body>
</html>

View File

@@ -2,8 +2,6 @@
title: Home
---
# :octicons-home-16: Home
<!--
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
@@ -31,36 +29,36 @@ title: Home
[![github open prs badge]][github open prs link]
[ci checks on dev badge]:
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
[ci checks on dev link]:
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
[ci checks on main badge]:
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[ci checks on main link]:
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]:
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
[github forks link]:
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
[github open issues badge]:
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
[github open issues link]:
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
[github open prs badge]:
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
[github open prs link]:
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
[github stars badge]:
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
[latest commit to dev badge]:
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to dev link]:
https://github.com/invoke-ai/InvokeAI/commits/development
https://github.com/invoke-ai/InvokeAI/commits/development
[latest release badge]:
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div>
@@ -83,439 +81,182 @@ Q&A</a>]
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
## :fontawesome-solid-computer: Hardware Requirements
### :octicons-cpu-24: System
You wil need one of the following:
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
only)
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
We do **not recommend** the following video cards due to issues with their
running in half-precision mode and having insufficient VRAM to render 512x512
images in full-precision mode:
- NVIDIA 10xx series cards such as the 1080ti
- GTX 1650 series cards
- GTX 1660 series cards
### :fontawesome-solid-memory: Memory and Disk
- At least 12 GB Main Memory RAM.
- At least 18 GB of free disk space for the machine learning model, Python,
and all its dependencies.
## :octicons-package-dependencies-24: Installation
This fork is supported across Linux, Windows and Macintosh. Linux users can use
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
### [Installation Getting Started Guide](installation)
First time users, please see
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
getting InvokeAI up and running on your system. For alternative installation and
upgrade instructions, please see:
[InvokeAI Installation Overview](installation/)
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
Users who wish to make use of the **PyPatchMatch** inpainting functions
will need to perform a bit of extra work to enable this
module. Instructions can be found at [Installing
PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
This method is recommended for 1st time users
If you have an NVIDIA card, you can benefit from the significant
memory savings and performance benefits provided by Facebook Lab's
**xFormers** module. Instructions for Linux and Windows users can be found
at [Installing xFormers](installation/070_INSTALL_XFORMERS.md).
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
## :fontawesome-solid-computer: Hardware Requirements
This method is recommended for experienced users and developers
### :octicons-cpu-24: System
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
You wil need one of the following:
This method is recommended for those familiar with running Docker containers
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
only)
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
### Other Installation Guides
We do **not recommend** the following video cards due to issues with their
running in half-precision mode and having insufficient VRAM to render 512x512
images in full-precision mode:
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
- NVIDIA 10xx series cards such as the 1080ti
- GTX 1650 series cards
- GTX 1660 series cards
### :fontawesome-solid-memory: Memory
- At least 12 GB Main Memory RAM.
### :fontawesome-regular-hard-drive: Disk
- At least 18 GB of free disk space for the machine learning model, Python, and
all its dependencies.
!!! info
Precision is auto configured based on the device. If however you encounter errors like
`expected type Float but found Half` or `not implemented for Half` you can try starting
`invoke.py` with the `--precision=float32` flag:
```bash
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
```
## :octicons-gift-24: InvokeAI Features
### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
- [Visual Manual for InvokeAI v2.3.1](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
<!-- separator -->
<!-- separator -->
### The InvokeAI Command Line Interface
- [Command Line Interace Reference Guide](features/CLI.md)
<!-- separator -->
### Image Management
- [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md)
- [Outpainting](features/OUTPAINTING.md)
- [Adding custom styles and subjects](features/CONCEPTS.md)
- [Using LoRA models](features/LORAS.md)
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other Features](features/OTHER.md)
<!-- separator -->
### Model Management
- [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md)
- [Adding custom styles and subjects via embeddings](features/CONCEPTS.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
- [The InvokeAI Web Interface](features/WEB.md) -
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- seperator -->
### Prompt Engineering
- [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md)
- [The Command Line Interace](features/CLI.md) -
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
[Outpainting](features/OUTPAINTING.md) -
[Adding custom styles and subjects](features/CONCEPTS.md) -
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
<!-- seperator -->
- [Generating Variations](features/VARIATIONS.md)
<!-- seperator -->
- [Prompt Engineering](features/PROMPTS.md)
<!-- seperator -->
- [Model Merging](features/MODEL_MERGING.md)
<!-- seperator -->
- Miscellaneous
- [NSFW Checker](features/NSFW.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other](features/OTHER.md)
## :octicons-log-16: Latest Changes
### v2.3.3 <small>(29 March 2023)</small>
#### Bug Fixes
1. When using legacy checkpoints with an external VAE, the VAE file is now scanned for malware prior to loading. Previously only the main model weights file was scanned.
2. Textual inversion will select an appropriate batchsize based on whether `xformers` is active, and will default to `xformers` enabled if the library is detected.
3. The batch script log file names have been fixed to be compatible with Windows.
4. Occasional corruption of the `.next_prefix` file (which stores the next output file name in sequence) on Windows systems is now detected and corrected.
5. An infinite loop when opening the developer's console from within the `invoke.sh` script has been corrected.
#### Enhancements
1. It is now possible to load and run several community-contributed SD-2.0 based models, including the infamous "Illuminati" model.
2. The "NegativePrompts" embedding file, and others like it, can now be loaded by placing it in the InvokeAI `embeddings` directory.
3. If no `--model` is specified at launch time, InvokeAI will remember the last model used and restore it the next time it is launched.
4. On Linux systems, the `invoke.sh` launcher now uses a prettier console-based interface. To take advantage of it, install the `dialog` package using your package manager (e.g. `sudo apt install dialog`).
5. When loading legacy models (safetensors/ckpt) you can specify a custom config file and/or a VAE by placing like-named files in the same directory as the model following this example:
```
my-favorite-model.ckpt
my-favorite-model.yaml
my-favorite-model.vae.pt # or my-favorite-model.vae.safetensors
```
### v2.3.2 <small>(13 March 2023)</small>
#### Bugfixes
Since version 2.3.1 the following bugs have been fixed:
1. Black images appearing for potential NSFW images when generating with legacy checkpoint models and both `--no-nsfw_checker` and `--ckpt_convert` turned on.
2. Black images appearing when generating from models fine-tuned on Stable-Diffusion-2-1-base. When importing V2-derived models, you may be asked to select whether the model was derived from a "base" model (512 pixels) or the 768-pixel SD-2.1 model.
3. The "Use All" button was not restoring the Hi-Res Fix setting on the WebUI
4. When using the model installer console app, models failed to import correctly when importing from directories with spaces in their names. A similar issue with the output directory was also fixed.
5. Crashes that occurred during model merging.
6. Restore previous naming of Stable Diffusion base and 768 models.
7. Upgraded to latest versions of `diffusers`, `transformers`, `safetensors` and `accelerate` libraries upstream. We hope that this will fix the `assertion NDArray > 2**32` issue that MacOS users have had when generating images larger than 768x768 pixels. Please report back.
As part of the upgrade to `diffusers`, the location of the diffusers-based models has changed from `models/diffusers` to `models/hub`. When you launch InvokeAI for the first time, it will prompt you to OK a one-time move. This should be quick and harmless, but if you have modified your `models/diffusers` directory in some way, for example using symlinks, you may wish to cancel the migration and make appropriate adjustments.
#### New "Invokeai-batch" script
2.3.2 introduces a new command-line only script called
`invokeai-batch` that can be used to generate hundreds of images from
prompts and settings that vary systematically. This can be used to try
the same prompt across multiple combinations of models, steps, CFG
settings and so forth. It also allows you to template prompts and
generate a combinatorial list like: ``` a shack in the mountains,
photograph a shack in the mountains, watercolor a shack in the
mountains, oil painting a chalet in the mountains, photograph a chalet
in the mountains, watercolor a chalet in the mountains, oil painting a
shack in the desert, photograph ... ```
If you have a system with multiple GPUs, or a single GPU with lots of
VRAM, you can parallelize generation across the combinatorial set,
reducing wait times and using your system's resources efficiently
(make sure you have good GPU cooling).
To try `invokeai-batch` out. Launch the "developer's console" using
the `invoke` launcher script, or activate the invokeai virtual
environment manually. From the console, give the command
`invokeai-batch --help` in order to learn how the script works and
create your first template file for dynamic prompt generation.
### v2.3.1 <small>(26 February 2023)</small>
This is primarily a bugfix release, but it does provide several new features that will improve the user experience.
#### Enhanced support for model management
InvokeAI now makes it convenient to add, remove and modify models. You can individually import models that are stored on your local system, scan an entire folder and its subfolders for models and import them automatically, and even directly import models from the internet by providing their download URLs. You also have the option of designating a local folder to scan for new models each time InvokeAI is restarted.
There are three ways of accessing the model management features:
1. ***From the WebUI***, click on the cube to the right of the model selection menu. This will bring up a form that allows you to import models individually from your local disk or scan a directory for models to import.
![image](https://user-images.githubusercontent.com/111189/220638091-918492cc-0719-4194-b033-3741e8289b30.png)
2. **Using the Model Installer App**
Choose option (5) _download and install models_ from the `invoke` launcher script to start a new console-based application for model management. You can use this to select from a curated set of starter models, or import checkpoint, safetensors, and diffusers models from a local disk or the internet. The example below shows importing two checkpoint URLs from popular SD sites and a HuggingFace diffusers model using its Repository ID. It also shows how to designate a folder to be scanned at startup time for new models to import.
Command-line users can start this app using the command `invokeai-model-install`.
![image](https://user-images.githubusercontent.com/111189/220660363-22ff3a2e-8082-410e-a818-d2b3a0529bac.png)
3. **Using the Command Line Client (CLI)**
The `!install_model` and `!convert_model` commands have been enhanced to allow entering of URLs and local directories to scan and import. The first command installs .ckpt and .safetensors files as-is. The second one converts them into the faster diffusers format before installation.
Internally InvokeAI is able to probe the contents of a .ckpt or .safetensors file to distinguish among v1.x, v2.x and inpainting models. This means that you do **not** need to include "inpaint" in your model names to use an inpainting model. Note that Stable Diffusion v2.x models will be autoconverted into a diffusers model the first time you use it.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management.
#### An Improved Installer Experience
The installer now launches a console-based UI for setting and changing commonly-used startup options:
![image](https://user-images.githubusercontent.com/111189/220644777-3d3a90ca-f9e2-4e6d-93da-cbdd66bf12f3.png)
After selecting the desired options, the installer installs several support models needed by InvokeAI's face reconstruction and upscaling features and then launches the interface for selecting and installing models shown earlier. At any time, you can edit the startup options by launching `invoke.sh`/`invoke.bat` and entering option (6) _change InvokeAI startup options_
Command-line users can launch the new configure app using `invokeai-configure`.
This release also comes with a renewed updater. To do an update without going through a whole reinstallation, launch `invoke.sh` or `invoke.bat` and choose option (9) _update InvokeAI_ . This will bring you to a screen that prompts you to update to the latest released version, to the most current development version, or any released or unreleased version you choose by selecting the tag or branch of the desired version.
![image](https://user-images.githubusercontent.com/111189/220650124-30a77137-d9cd-406e-a87d-d8283f99a4b3.png)
Command-line users can run this interface by typing `invokeai-configure`
#### Image Symmetry Options
There are now features to generate horizontal and vertical symmetry during generation. The way these work is to wait until a selected step in the generation process and then to turn on a mirror image effect. In addition to generating some cool images, you can also use this to make side-by-side comparisons of how an image will look with more or fewer steps. Access this option from the WebUI by selecting _Symmetry_ from the image generation settings, or within the CLI by using the options `--h_symmetry_time_pct` and `--v_symmetry_time_pct` (these can be abbreviated to `--h_sym` and `--v_sym` like all other options).
![image](https://user-images.githubusercontent.com/111189/220658687-47fd0f2c-7069-4d95-aec9-7196fceb360d.png)
#### A New Unified Canvas Look
This release introduces a beta version of the WebUI Unified Canvas. To try it out, open up the settings dialogue in the WebUI (gear icon) and select _Use Canvas Beta Layout_:
![image](https://user-images.githubusercontent.com/111189/220646958-b7eca95e-dc39-4cd2-b277-63eac98ed446.png)
Refresh the screen and go to to Unified Canvas (left side of screen, third icon from the top). The new layout is designed to provide more space to work in and to keep the image controls close to the image itself:
![image](https://user-images.githubusercontent.com/111189/220647560-4a9265a1-6926-44f9-9d08-e1ef2ce61ff8.png)
#### Model conversion and merging within the WebUI
The WebUI now has an intuitive interface for model merging, as well as for permanent conversion of models from legacy .ckpt/.safetensors formats into diffusers format. These options are also available directly from the `invoke.sh`/`invoke.bat` scripts.
#### An easier way to contribute translations to the WebUI
We have migrated our translation efforts to [Weblate](https://hosted.weblate.org/engage/invokeai/), a FOSS translation product. Maintaining the growing project's translations is now far simpler for the maintainers and community. Please review our brief [translation guide](https://github.com/invoke-ai/InvokeAI/blob/v2.3.1/docs/other/TRANSLATION.md) for more information on how to contribute.
#### Numerous internal bugfixes and performance issues
This releases quashes multiple bugs that were reported in 2.3.0. Major internal changes include upgrading to `diffusers 0.13.0`, and using the `compel` library for prompt parsing. See [Detailed Change Log](#full-change-log) for a detailed list of bugs caught and squished.
#### Summary of InvokeAI command line scripts (all accessible via the launcher menu)
| Command | Description |
|--------------------------|---------------------------------------------------------------------|
| `invokeai` | Command line interface |
| `invokeai --web` | Web interface |
| `invokeai-model-install` | Model installer with console forms-based front end |
| `invokeai-ti --gui` | Textual inversion, with a console forms-based front end |
| `invokeai-merge --gui` | Model merging, with a console forms-based front end |
| `invokeai-configure` | Startup configuration; can also be used to reinstall support models |
| `invokeai-update` | InvokeAI software updater |
### v2.3.0 <small>(9 February 2023)</small>
#### Migration to Stable Diffusion `diffusers` models
Previous versions of InvokeAI supported the original model file format
introduced with Stable Diffusion 1.4. In the original format, known variously as
"checkpoint", or "legacy" format, there is a single large weights file ending
with `.ckpt` or `.safetensors`. Though this format has served the community
well, it has a number of disadvantages, including file size, slow loading times,
and a variety of non-standard variants that require special-case code to handle.
In addition, because checkpoint files are actually a bundle of multiple machine
learning sub-models, it is hard to swap different sub-models in and out, or to
share common sub-models. A new format, introduced by the StabilityAI company in
collaboration with HuggingFace, is called `diffusers` and consists of a
directory of individual models. The most immediate benefit of `diffusers` is
that they load from disk very quickly. A longer term benefit is that in the near
future `diffusers` models will be able to share common sub-models, dramatically
reducing disk space when you have multiple fine-tune models derived from the
same base.
When you perform a new install of version 2.3.0, you will be offered the option
to install the `diffusers` versions of a number of popular SD models, including
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
2.1). These will act and work just like the checkpoint versions. Do not be
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
InvokeAI 2.3.0 can still load these and generate images from them without any
extra intervention on your part.
To take advantage of the optimized loading times of `diffusers` models, InvokeAI
offers options to convert legacy checkpoint models into optimized `diffusers`
models. If you use the `invokeai` command line interface, the relevant commands
are:
- `!convert_model` -- Take the path to a local checkpoint file or a URL that
is pointing to one, convert it into a `diffusers` model, and import it into
InvokeAI's models registry file.
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
models file, this command will accept its short name and convert it into a
like-named `diffusers` model, optionally deleting the original checkpoint
file.
- `!import_model` -- Take the local path of either a checkpoint file or a
`diffusers` model directory and import it into InvokeAI's registry file. You
may also provide the ID of any diffusers model that has been published on
the
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
and it will be downloaded and installed automatically.
The WebGUI offers similar functionality for model management.
For advanced users, new command-line options provide additional functionality.
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
the path to a directory of checkpoint files, automatically converts them into
`diffusers` models and imports them. Each time the script is launched, the
directory will be scanned for new checkpoint files to be loaded. Alternatively,
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
that is already registered with InvokeAI to be converted into a `diffusers`
model on the fly, allowing you to take advantage of future diffusers-only
features without explicitly converting the model and saving it to disk.
Please see
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
for more information on model management in both the command-line and Web
interfaces.
#### Support for the `XFormers` Memory-Efficient Crossattention Package
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
installed, the`xformers` package dramatically reduces the memory footprint of
loaded Stable Diffusion models files and modestly increases image generation
speed. `xformers` will be installed and activated automatically if you specify a
CUDA system at install time.
The caveat with using `xformers` is that it introduces slightly
non-deterministic behavior, and images generated using the same seed and other
settings will be subtly different between invocations. Generally the changes are
unnoticeable unless you rapidly shift back and forth between images, but to
disable `xformers` and restore fully deterministic behavior, you may launch
InvokeAI using the `--no-xformers` option. This is most conveniently done by
opening the file `invokeai/invokeai.init` with a text editor, and adding the
line `--no-xformers` at the bottom.
#### A Negative Prompt Box in the WebUI
There is now a separate text input box for negative prompts in the WebUI. This
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
box as well.
To see exactly how your prompts are being parsed, launch `invokeai` with the
`--log_tokenization` option. The console window will then display the
tokenization process for both positive and negative prompts.
#### Model Merging
Version 2.3.0 offers an intuitive user interface for merging up to three Stable
Diffusion models using an intuitive user interface. Model merging allows you to
mix the behavior of models to achieve very interesting effects. To use this,
each of the models must already be imported into InvokeAI and saved in
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
the proportions in which to mix them, and the mixing algorithm. The script will
create a new merged `diffusers` model and import it into InvokeAI for your use.
See
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
for more details.
#### Textual Inversion Training
Textual Inversion (TI) is a technique for training a Stable Diffusion model to
emit a particular subject or style when triggered by a keyword phrase. You can
perform TI training by placing a small number of images of the subject or style
in a directory, and choosing a distinctive trigger phrase, such as
"pointillist-style". After successful training, The subject or style will be
activated by including `<pointillist-style>` in your prompt.
Previous versions of InvokeAI were able to perform TI, but it required using a
command-line script with dozens of obscure command-line arguments. Version 2.3.0
features an intuitive TI frontend that will build a TI model on top of any
`diffusers` model. To access training you can launch from a new item in the
launcher script or from the command line using `invokeai-ti --gui`.
See
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
for further details.
#### A New Installer Experience
The InvokeAI installer has been upgraded in order to provide a smoother and
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
a PyPi project, allowing developers and power-users to install InvokeAI with the
command `pip install InvokeAI --use-pep517`. Please see
[Installation](#installation) for details.
Developers should be aware that the `pip` installation procedure has been
simplified and that the `conda` method is no longer supported at all.
Accordingly, the `environments_and_requirements` directory has been deleted from
the repository.
#### Command-line name changes
All of InvokeAI's functionality, including the WebUI, command-line interface,
textual inversion training and model merging, can all be accessed from the
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
expanded to add the new functionality. For the convenience of developers and
power users, we have normalized the names of the InvokeAI command-line scripts:
- `invokeai` -- Command-line client
- `invokeai --web` -- Web GUI
- `invokeai-merge --gui` -- Model merging script with graphical front end
- `invokeai-ti --gui` -- Textual inversion script with graphical front end
- `invokeai-configure` -- Configuration tool for initializing the `invokeai`
directory and selecting popular starter models.
For backward compatibility, the old command names are also recognized, including
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
eventually be removed.
Developers should be aware that the locations of the script's source code has
been moved. The new locations are:
- `invokeai` => `ldm/invoke/CLI.py`
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
Developers are strongly encouraged to perform an "editable" install of InvokeAI
using `pip install -e . --use-pep517` in the Git repository, and then to call
the scripts using their 2.3.0 names, rather than executing the scripts directly.
Developers should also be aware that the several important data files have been
relocated into a new directory named `invokeai`. This includes the WebGUI's
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
by the installer to select starter models. Eventually all InvokeAI modules will
be in subdirectories of `invokeai`.
Please see
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
for further details. For older changelogs, please visit the
### v2.2.4 <small>(11 December 2022)</small>
#### the `invokeai` directory
Previously there were two directories to worry about, the directory that
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
directory that contained the models files, embeddings, configuration and
outputs. With the 2.2.4 release, this dual system is done away with, and
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
live in a directory named `invokeai`. By default this directory is located in
your home directory (e.g. `\Users\yourname` on Windows), but you can select
where it goes at install time.
After installation, you can delete the install directory (the one that the zip
file creates when it unpacks). Do **not** delete or move the `invokeai`
directory!
##### Initialization file `invokeai/invokeai.init`
You can place frequently-used startup options in this file, such as the default
number of steps or your preferred sampler. To keep everything in one place, this
file has now been moved into the `invokeai` directory and is named
`invokeai.init`.
#### To update from Version 2.2.3
The easiest route is to download and unpack one of the 2.2.4 installer files.
When it asks you for the location of the `invokeai` runtime directory, respond
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
and answer "Y" when asked if you want to reuse the directory.
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
does not know about the new directory layout and won't be fully functional.
#### To update to 2.2.5 (and beyond) there's now an update path.
As they become available, you can update to more recent versions of InvokeAI
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
Running it without any arguments will install the most recent version of
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
script with an argument in the command shell. This syntax accepts the path to
the desired release's zip file, which you can find by clicking on the green
"Code" button on this repository's home page.
#### Other 2.2.4 Improvements
- Fix InvokeAI GUI initialization by @addianto in #1687
- fix link in documentation by @lstein in #1728
- Fix broken link by @ShawnZhong in #1736
- Remove reference to binary installer by @lstein in #1731
- documentation fixes for 2.2.3 by @lstein in #1740
- Modify installer links to point closer to the source installer by @ebr in
#1745
- add documentation warning about 1650/60 cards by @lstein in #1753
- Fix Linux source URL in installation docs by @andybearman in #1756
- Make install instructions discoverable in readme by @damian0815 in #1752
- typo fix by @ofirkris in #1755
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
in #1765
- stability and usage improvements to binary & source installers by @lstein in
#1760
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
- invoke script cds to its location before running by @lstein in #1805
- Make PaperCut and VoxelArt models load again by @lstein in #1730
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
#1817
- Clean up readme by @hipsterusername in #1820
- Optimized Docker build with support for external working directory by @ebr in
#1544
- disable pushing the cloud container by @mauwii in #1831
- Fix docker push github action and expand with additional metadata by @ebr in
#1837
- Fix Broken Link To Notebook by @VedantMadane in #1821
- Account for flat models by @spezialspezial in #1766
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
@SammCheese in #1848
- Make force free GPU memory work in img2img by @addianto in #1844
- New installer by @lstein
For older changelogs, please visit the
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting
Please check out our
**[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
to get solutions for common installation problems and other issues.
**[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get
solutions for common installation problems and other issues.
## :octicons-repo-push-24: Contributing
@@ -541,8 +282,8 @@ thank them for their time, hard work and effort.
For support, please use this repository's GitHub Issues tracking service. Feel
free to send me an email if you use and like the script.
Original portions of the software are Copyright (c) 2022-23 by
[The InvokeAI Team](https://github.com/invoke-ai).
Original portions of the software are Copyright (c) 2020
[Lincoln D. Stein](https://github.com/lstein)
## :octicons-book-24: Further Reading

View File

@@ -6,106 +6,57 @@ title: Installing with the Automated Installer
## Introduction
The automated installer is a Python script that automates the steps
needed to install and run InvokeAI on a stock computer running recent
versions of Linux, MacOS or Windows. It will leave you with a version
that runs a stable version of InvokeAI with the option to upgrade to
experimental versions later.
The automated installer is a shell script that attempts to automate every step
needed to install and run InvokeAI on a stock computer running recent versions
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
version of InvokeAI with the option to upgrade to experimental versions later.
## Walk through
1. <a name="hardware_requirements">**Hardware Requirements**: </a>Make sure that your system meets the [hardware
requirements](../index.md#hardware-requirements) and has the
appropriate GPU drivers installed. For a system with an NVIDIA
card installed, you will need to install the CUDA driver, while
AMD-based cards require the ROCm driver. In most cases, if you've
already used the system for gaming or other graphics-intensive
tasks, the appropriate drivers will already be installed. If
unsure, check the [GPU Driver Guide](030_INSTALL_CUDA_AND_ROCM.md)
1. Make sure that your system meets the
[hardware requirements](../index.md#hardware-requirements) and has the
appropriate GPU drivers installed. In particular, if you are a Linux user
with an AMD GPU installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
!!! info "Required Space"
Installation requires roughly 18G of free disk space to load
the libraries and recommended model weights files.
Installation requires roughly 18G of free disk space to load the libraries and
recommended model weights files.
Regardless of your destination disk, your *system drive*
(`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB
of free disk space to download and cache python
dependencies.
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
NOTE for Linux users: if your temporary directory is mounted
as a `tmpfs`, ensure it has sufficient space.
2. Check that your system has an up-to-date Python installed. To do this, open
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
"Powershell" on Windows) and type `python --version`. If Python is
installed, it will print out the version number. If it is version `3.9.1` or `3.10.x`, you meet requirements.
2. <a name="software_requirements">**Software Requirements**: </a>Check that your system has an up-to-date Python installed. To do
this, open up a command-line window ("Terminal" on Linux and
Macintosh, "Command" or "Powershell" on Windows) and type `python
--version`. If Python is installed, it will print out the version
number. If it is version `3.9.*` or `3.10.*`, you meet
requirements. We do not recommend using Python 3.11 or higher,
as not all the libraries that InvokeAI depends on work properly
with this version.
!!! warning "At this time we do not recommend Python 3.11"
!!! warning "What to do if you have an unsupported version"
!!! warning "If you see an older version, or get a command not found error"
Go to [Python Downloads](https://www.python.org/downloads/)
and download the appropriate installer package for your
platform. We recommend [Version
3.10.9](https://www.python.org/downloads/release/python-3109/),
Go to [Python Downloads](https://www.python.org/downloads/) and
download the appropriate installer package for your platform. We recommend
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
which has been extensively tested with InvokeAI.
_Please select your platform in the section below for platform-specific
setup requirements._
=== "Windows"
During the Python configuration process, look out for a
checkbox to add Python to your PATH and select it. If the
install script complains that it can't find python, then open
the Python installer again and choose "Modify" existing
installation.
=== "Windows users"
Installation requires an up to date version of the Microsoft
Visual C libraries. Please install the 2015-2022 libraries
available here:
https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
- During the Python configuration process,
look out for a checkbox to add Python to your PATH
and select it. If the install script complains that it can't
find python, then open the Python installer again and choose
"Modify" existing installation.
Please double-click on the file `WinLongPathsEnabled.reg` and
accept the dialog box that asks you if you wish to modify your registry.
This activates long filename support on your system and will prevent
mysterious errors during installation.
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
=== "Linux"
To install an appropriate version of Python on Ubuntu 22.04
and higher, run the following:
=== "Mac users"
```
sudo apt update
sudo apt install -y python3 python3-pip python3-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
On Ubuntu 20.04, the process is slightly different:
```
sudo apt update
sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt install python3.10 python3-pip python3.10-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
Both `python` and `python3` commands are now pointing at
Python3.10. You can still access older versions of Python by
calling `python2`, `python3.8`, etc.
Linux systems require a couple of additional graphics
libraries to be installed for proper functioning of
`python3-opencv`. Please run the following:
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
=== "Mac"
After installing Python, you may need to run the
- After installing Python, you may need to run the
following command from the Terminal in order to install the Web
certificates needed to download model data from https sites. If
you see lots of CERTIFICATE ERRORS during the last part of the
@@ -113,81 +64,109 @@ experimental versions later.
`/Applications/Python\ 3.10/Install\ Certificates.command`
You may need to install the Xcode command line tools. These
- You may need to install the Xcode command line tools. These
are a set of tools that are needed to run certain applications in a
Terminal, including InvokeAI. This package is provided
directly by Apple. To install, open a terminal window and run `xcode-select --install`. You will get a macOS system popup guiding you through the
install. If you already have them installed, you will instead see some
output in the Terminal advising you that the tools are already installed. More information can be found at [FreeCode Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
Terminal, including InvokeAI. This package is provided directly by Apple.
3. **Download the Installer**: The InvokeAI installer is distributed as a ZIP files. Go to the
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
and look for a file named:
- To install, open a terminal window and run `xcode-select
--install`. You will get a macOS system popup guiding you through the
install. If you already have them installed, you will instead see some
output in the Terminal advising you that the tools are already installed.
- InvokeAI-installer-v2.X.X.zip
- More information can be found here:
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
where "2.X.X" is the latest released version. The file is located
at the very bottom of the release page, under **Assets**.
=== "Linux users"
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
directory named "InvokeAI-Installer". When unpacked, the directory
will look like this:
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
<figure markdown>
![zipfile-screenshot](../assets/installer-walkthrough/unpacked-zipfile.png)
</figure>
On Ubuntu 22.04 and higher, run the following:
5. **Launch the installer script from the desktop**: If you are using a desktop GUI, double-click the installer file
appropriate for your platform. It will be named `install.bat` on
Windows systems and `install.sh` on Linux and Macintosh
systems. Be aware that your system's file browser may suppress the
display of the file extension.
```
sudo apt update
sudo apt install -y python3 python3-pip python3-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
On Windows systems if you get an "Untrusted Publisher" warning.
Click on "More Info" and then select "Run Anyway." You trust us, right?
On Ubuntu 20.04, the process is slightly different:
6. **[Alternative] Launch the installer script from the command line**: Alternatively, from the command line, run the shell script or .bat file:
```
sudo apt update
sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt install python3.10 python3-pip python3.10-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
3. The source installer is distributed in ZIP files. Go to the
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
look for a series of files named:
- InvokeAI-installer-2.X.X.zip
(Where 2.X.X is the current release number).
Download the latest release.
4. Unpack the zip file into a convenient directory. This will create a new
directory named "InvokeAI-Installer". This example shows how this would look
using the `unzip` command-line tool, but you may use any graphical or
command-line Zip extractor:
```cmd
C:\Documents\Linco> unzip InvokeAI-installer-2.X.X-windows.zip
Archive: C: \Linco\Downloads\InvokeAI-installer-2.X.X-windows.zip
creating: InvokeAI-Installer\
inflating: InvokeAI-Installer\install.bat
inflating: InvokeAI-Installer\readme.txt
...
```
After successful installation, you can delete the `InvokeAI-Installer`
directory.
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
accept the dialog box that asks you if you wish to modify your registry.
This activates long filename support on your system and will prevent
mysterious errors during installation.
6. If you are using a desktop GUI, double-click the installer file. It will be
named `install.bat` on Windows systems and `install.sh` on Linux and
Macintosh systems.
On Windows systems you will probably get an "Untrusted Publisher" warning.
Click on "More Info" and select "Run Anyway." You trust us, right?
7. Alternatively, from the command line, run the shell script or .bat file:
```cmd
C:\Documents\Linco> cd InvokeAI-Installer
C:\Documents\Linco\invokeAI> .\install.bat
C:\Documents\Linco\invokeAI> install.bat
```
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
8. The script will ask you to choose where to install InvokeAI. Select a
directory with at least 18G of free space for a full install. InvokeAI and
all its support files will be installed into a new directory named
`invokeai` located at the location you specify.
<figure markdown>
![confirm-install-directory-screenshot](../assets/installer-walkthrough/confirm-directory.png)
</figure>
- The default is to install the `invokeai` directory in your home directory,
usually `C:\Users\YourName\invokeai` on Windows systems,
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
on Macintoshes, where "YourName" is your login name.
-If you have previously installed InvokeAI, you will be asked to
confirm whether you want to reinstall into this directory. You
may choose to reinstall, in which case your version will be upgraded,
or choose a different directory.
- The script uses tab autocompletion to suggest directory path completions.
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
to suggest completions.
8. **Select your GPU**: The installer will autodetect your platform and will request you to
confirm the type of GPU your graphics card has. On Linux systems,
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
or CPU (no graphics acceleration). On Windows, you'll have the
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
you select CPU on M1 or M2 Macintoshes, you will get MPS-based
graphics acceleration without installing additional drivers. If you
are unsure what GPU you are using, you can ask the installer to
guess.
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
libraries needed by InvokeAI and the application itself.
9. Sit back and let the install script work. It will install the third-party
libraries needed by InvokeAI, then download the current InvokeAI release and
install it.
Be aware that some of the library download and install steps take a long
time. In particular, the `pytorch` package is quite large and often appears
@@ -197,141 +176,25 @@ experimental versions later.
minutes and nothing is happening, you can interrupt the script with ^C. You
may restart it and it will pick up where it left off.
<figure markdown>
![initial-settings-screenshot](../assets/installer-walkthrough/settings-form.png)
</figure>
10. After installation completes, the installer will launch the configuration script, which will guide you through the first-time process
of selecting one or more Stable Diffusion model weights files, downloading
and configuring them. We provide a list of popular models that InvokeAI
performs well with. However, you can add more weight files later on using
the command-line client or the Web UI. See
[Installing Models](050_INSTALLING_MODELS.md) for details.
10. **Post-install Configuration**: After installation completes, the
installer will launch the configuration form, which will guide you
through the first-time process of adjusting some of InvokeAI's
startup settings. To move around this form use ctrl-N for
&lt;N&gt;ext and ctrl-P for &lt;P&gt;revious, or use &lt;tab&gt;
and shift-&lt;tab&gt; to move forward and back. Once you are in a
multi-checkbox field use the up and down cursor keys to select the
item you want, and &lt;space&gt; to toggle it on and off. Within
a directory field, pressing &lt;tab&gt; will provide autocomplete
options.
Note that the main Stable Diffusion weights file is protected by a license
agreement that you must agree to in order to use. The script will list the
steps you need to take to create an account on the official site that hosts
the weights files, accept the agreement, and provide an access token that
allows InvokeAI to legally download and install the weights files.
Generally the defaults are fine, and you can come back to this screen at
any time to tweak your system. Here are the options you can adjust:
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
- ***Output directory for images***
This is the path to a directory in which InvokeAI will store all its
generated images.
- ***NSFW checker***
If checked, InvokeAI will test images for potential sexual content
and blur them out if found. Note that the NSFW checker consumes
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
by most image models. If you have a low VRAM GPU (4-6 GB), you
can reduce out of memory errors by disabling the checker.
- ***HuggingFace Access Token***
InvokeAI has the ability to download embedded styles and subjects
from the HuggingFace Concept Library on-demand. However, some of
the concept library files are password protected. To make download
smoother, you can set up an account at huggingface.co, obtain an
access token, and paste it into this field. Note that you paste
to this screen using ctrl-shift-V
- ***Free GPU memory after each generation***
This is useful for low-memory machines and helps minimize the
amount of GPU VRAM used by InvokeAI.
- ***Enable xformers support if available***
If the xformers library was successfully installed, this will activate
it to reduce memory consumption and increase rendering speed noticeably.
Note that xformers has the side effect of generating slightly different
images even when presented with the same seed and other settings.
- ***Force CPU to be used on GPU systems***
This will use the (slow) CPU rather than the accelerated GPU. This
can be used to generate images on systems that don't have a compatible
GPU.
- ***Precision***
This controls whether to use float32 or float16 arithmetic.
float16 uses less memory but is also slightly less accurate.
Ordinarily the right arithmetic is picked automatically ("auto"),
but you may have to use float32 to get images on certain systems
and graphics cards. The "autocast" option is deprecated and
shouldn't be used unless you are asked to by a member of the team.
- ***Number of models to cache in CPU memory***
This allows you to keep models in memory and switch rapidly among
them rather than having them load from disk each time. This slider
controls how many models to keep loaded at once. Each
model will use 2-4 GB of RAM, so use this cautiously
- ***Directory containing embedding/textual inversion files***
This is the directory in which you can place custom embedding
files (.pt or .bin). During startup, this directory will be
scanned and InvokeAI will print out the text terms that
are available to trigger the embeddings.
At the bottom of the screen you will see a checkbox for accepting
the CreativeML Responsible AI License. You need to accept the license
in order to download Stable Diffusion models from the next screen.
_You can come back to the startup options form_ as many times as you like.
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
this script. On the command line, it is named `invokeai-configure`.
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
to another screen that prompts you to download a series of starter models. The ones
we recommend are preselected for you, but you are encouraged to use the checkboxes to
pick and choose.
You will probably wish to download `autoencoder-840000` for use with models that
were trained with an older version of the Stability VAE.
<figure markdown>
![select-models-screenshot](../assets/installer-walkthrough/installing-models.png)
</figure>
Below the preselected list of starter models is a large text field which you can use
to specify a series of models to import. You can specify models in a variety of formats,
each separated by a space or newline. The formats accepted are:
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
the file browser to the textfield to automatically paste the path. Be sure to remove
extraneous quotation marks and other things that come along for the ride.
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
The directory will be scanned from top to bottom (including subfolders) and any
file that can be imported will be.
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
and paste directly from a web page, or simply drag the link from the web page
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
The file will be downloaded and installed.
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
the format _author_name/model_name_, as in `andite/anything-v4.0`
- The path to a local directory containing a `diffusers`
model. These directories always have the file `model_index.json`
at their top level.
_Select a directory for models to import_ You may select a local
directory for autoimporting at startup time. If you select this
option, the directory you choose will be scanned for new
.ckpt/.safetensors files each time InvokeAI starts up, and any new
files will be automatically imported and made available for your
use.
_Convert imported models into diffusers_ When legacy checkpoint
files are imported, you may select to use them unmodified (the
default) or to convert them into `diffusers` models. The latter
load much faster and have slightly better rendering performance,
but not all checkpoint files can be converted. Note that Stable Diffusion
Version 2.X files are **only** supported in `diffusers` format and will
be converted regardless.
_You can come back to the model install form_ as many times as you like.
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
this script. On the command line, it is named `invokeai-model-install`.
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
11. The script will now exit and you'll be ready to generate some images. Look
for the directory `invokeai` installed in the location you chose at the
beginning of the install session. Look for a shell script named `invoke.sh`
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
@@ -342,83 +205,49 @@ experimental versions later.
C:\Documents\Linco\invokeAI> invoke.bat
```
- The `invoke.bat` (`invoke.sh`) script will give you the choice
of starting (1) the command-line interface, (2) the web GUI, (3)
textual inversion training, and (4) model merging.
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
(1) the command-line interface, or (2) the web GUI. If you start the
latter, you can load the user interface by pointing your browser at
http://localhost:9090.
- By default, the script will launch the web interface. When you
do this, you'll see a series of startup messages ending with
instructions to point your browser at
http://localhost:9090. Click on this link to open up a browser
and start exploring InvokeAI's features.
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
customize its behavior. For example, you can change the location of the
image output directory, or select your favorite sampler. See the
[Command-Line Interface](../features/CLI.md) for a full list of the options.
- To set defaults that will take effect every time you launch InvokeAI,
use a text editor (e.g. Notepad) to exit the file
`invokeai\invokeai.init`. It contains a variety of examples that you can
follow to add and modify launch options.
- The launcher script also offers you an option labeled "open the developer
- The script also offers you a third option labeled "open the developer
console". If you choose this option, you will be dropped into a
command-line interface in which you can run python commands directly,
access developer tools, and launch InvokeAI with customized options.
12. You can launch InvokeAI with several different command-line arguments that
customize its behavior. For example, you can change the location of the
image output directory, or select your favorite sampler. See the
[Command-Line Interface](../features/CLI.md) for a full list of the options.
!!! warning "Do not move or remove the `invokeai` directory"
The `invokeai` directory contains the `invokeai` application, its
configuration files, the model weight files, and outputs of image generation.
Once InvokeAI is installed, do not move or remove this directory."
- To set defaults that will take effect every time you launch InvokeAI,
use a text editor (e.g. Notepad) to exit the file
`invokeai\invokeai.init`. It contains a variety of examples that you can
follow to add and modify launch options.
!!! warning "The `invokeai` directory contains the `invokeai` application, its
configuration files, the model weight files, and outputs of image generation.
Once InvokeAI is installed, do not move or remove this directory."
## Troubleshooting
### _Package dependency conflicts_
If you have previously installed InvokeAI or another Stable Diffusion
package, the installer may occasionally pick up outdated libraries and
either the installer or `invoke` will fail with complaints about
library conflicts. In this case, run the `invoke.sh`/`invoke.bat`
command and enter the Developer's Console by picking option (5). This
will take you to a command-line prompt.
If you have previously installed InvokeAI or another Stable Diffusion package,
the installer may occasionally pick up outdated libraries and either the
installer or `invoke` will fail with complaints about library conflicts. You can
address this by entering the `invokeai` directory and running `update.sh`, which
will bring InvokeAI up to date with the latest libraries.
Then give this command:
### ldm from pypi
`pip install InvokeAI --force-reinstall`
!!! warning
This should fix the issues.
### InvokeAI runs extremely slowly on Linux or Windows systems
The most frequent cause of this problem is when the installation
process installed the CPU-only version of the torch machine-learning
library, rather than a version that takes advantage of GPU
acceleration. To confirm this issue, look at the InvokeAI startup
messages. If you see a message saying ">> Using device CPU", then
this is what happened.
To fix this problem, first determine whether you have an NVidia or an
AMD GPU. The former uses the CUDA driver, and the latter uses ROCm
(only available on Linux). Then run the `invoke.sh`/`invoke.bat`
command and enter the Developer's Console by picking option (5). This
will take you to a command-line prompt.
Then type the following commands:
=== "NVIDIA System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu117
pip install xformers
```
=== "AMD System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
```
Some users have tried to correct dependency problems by installing
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
ldm will make matters worse. If you've installed ldm, uninstall it with
`pip uninstall ldm`.
### Corrupted configuration file
@@ -443,53 +272,7 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
assistance.
### Out of Memory Issues
The models are large, VRAM is expensive, and you may find yourself
faced with Out of Memory errors when generating images. Here are some
tips to reduce the problem:
* **4 GB of VRAM**
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
and derived models, provided that you **disable** the NSFW checker. To
disable the filter, do one of the following:
* Select option (6) "_change InvokeAI startup options_" from the
launcher. This will bring up the console-based startup settings
dialogue and allow you to unselect the "NSFW Checker" option.
* Start the startup settings dialogue directly by running
`invokeai-configure --skip-sd-weights --skip-support-models`
from the command line.
* Find the `invokeai.init` initialization file in the InvokeAI root
directory, open it in a text editor, and change `--nsfw_checker`
to `--no-nsfw_checker`
If you are on a CUDA system, you can realize significant memory
savings by activating the `xformers` library as described above. The
downside is `xformers` introduces non-deterministic behavior, such
that images generated with exactly the same prompt and settings will
be slightly different from each other. See above for more information.
* **6 GB of VRAM**
This is a border case. Using the SD 1.5 series you should be able to
generate images up to 640x640 with the NSFW checker enabled, and up to
1024x1024 with it disabled and `xformers` activated.
If you run into persistent memory issues there are a series of
environment variables that you can set before launching InvokeAI that
alter how the PyTorch machine learning library manages memory. See
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
a list of these tweaks.
* **12 GB of VRAM**
This should be sufficient to generate larger images up to about
1280x1280. If you wish to push further, consider activating
`xformers`.
### Other Problems
### other problems
If you run into problems during or after installation, the InvokeAI team is
available to help you. Either create an
@@ -501,20 +284,36 @@ hours, and often much sooner.
## Updating to newer versions
This distribution is changing rapidly, and we add new features
regularly. Releases are announced at
http://github.com/invoke-ai/InvokeAI/releases, and at
https://pypi.org/project/InvokeAI/ To update to the latest released
version (recommended), follow these steps:
This distribution is changing rapidly, and we add new features on a daily basis.
To update to the latest released version (recommended), run the `update.sh`
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
release and re-run the `invokeai-configure` script to download any updated
models files that may be needed. You can also use this to add additional models
that you did not select at installation time.
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
`invokeai` root directory.
You can now close the developer console and run `invoke` as before. If you get
complaints about missing models, then you may need to do the additional step of
running `invokeai-configure`. This happens relatively infrequently. To do
this, simply open up the developer's console again and type
`invokeai-configure`.
2. Choose menu item (10) "Update InvokeAI".
You may also use the `update` script to install any selected version of
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
link of the version you wish to install. You can find the zip links by going to
the one of the release pages and looking for the **Assets** section at the
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
big code directory on the InvokeAI welcome page. When you find the version you
want to install, go to the green "&lt;&gt; Code" button at the top, and copy the
"Download ZIP" link.
3. This will launch a menu that gives you the option of:
Now run `update.sh` (or `update.bat`) with the version number of the desired InvokeAI
version as its argument. For example, this will install the old 2.2.0 release.
1. Updating to the latest official release;
2. Updating to the bleeding-edge development version; or
3. Manually entering the tag or branch name of a version of
InvokeAI you wish to try out.
```cmd
update.sh v2.2.0
```
You can get the list of version numbers by going to the [releases
page](https://github.com/invoke-ai/InvokeAI/releases) or by browsing
the (Tags)[https://github.com/invoke-ai/InvokeAI/tags] list from the
Code section of the main github page.

View File

@@ -14,56 +14,17 @@ title: Installing Manually
## Introduction
!!! tip "Conda"
As of InvokeAI v2.3.0 installation using the `conda` package manager is no longer being supported. It will likely still work, but we are not testing this installation method.
!!! tip As of InvokeAI v2.3.0 installation using the `conda` package manager
is no longer being supported. It will likely still work, but we are not testing
this installation method.
On Windows systems, you are encouraged to install and use the
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
which provides compatibility with Linux and Mac shells and nice
features such as command-line completion.
which provides compatibility with Linux and Mac shells and nice features such as
command-line completion.
### Prerequisites
Before you start, make sure you have the following preqrequisites
installed. These are described in more detail in [Automated
Installation](010_INSTALL_AUTOMATED.md), and in many cases will
already be installed (if, for example, you have used your system for
gaming):
* **Python**
version 3.9 or 3.10 (3.11 is not recommended).
* **CUDA Tools**
For those with _NVidia GPUs_, you will need to
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
* **ROCm Tools**
For _Linux users with AMD GPUs_, you will need
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **Visual C++ Libraries**
_Windows users_ must install the free
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
* **The Xcode command line tools**
for _Macintosh users_. Instructions are available at
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* _Macintosh users_ may also need to run the `Install Certificates` command
if model downloads give lots of certificate errors. Run:
`/Applications/Python\ 3.10/Install\ Certificates.command`
### Installation Walkthrough
To install InvokeAI with virtual environments and the PIP package
manager, please follow these steps:
To install InvokeAI with virtual environments and the PIP package manager,
please follow these steps:
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
procedure depends on this and will not work with other versions:
@@ -72,127 +33,74 @@ manager, please follow these steps:
python -V
```
2. Create a directory to contain your InvokeAI library, configuration
files, and models. This is known as the "runtime" or "root"
directory, and often lives in your home directory under the name `invokeai`.
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
GitHub:
Please keep in mind the disk space requirements - you will need at
least 20GB for the models and the virtual environment. From now
on we will refer to this directory as `INVOKEAI_ROOT`. For convenience,
the steps below create a shell variable of that name which contains the
path to `HOME/invokeai`.
=== "Linux/Mac"
```bash
export INVOKEAI_ROOT=~/invokeai
mkdir $INVOKEAI_ROOT
```
=== "Windows (Powershell)"
```bash
Set-Variable -Name INVOKEAI_ROOT -Value $Home/invokeai
mkdir $INVOKEAI_ROOT
```
3. Enter the root (invokeai) directory and create a virtual Python
environment within it named `.venv`. If the command `python`
doesn't work, try `python3`. Note that while you may create the
virtual environment anywhere in the file system, we recommend that
you create it within the root directory as shown here. This makes
it possible for the InvokeAI applications to find the model data
and configuration. If you do not choose to install the virtual
environment inside the root directory, then you **must** set the
`INVOKEAI_ROOT` environment variable in your shell environment, for
example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the
Windows environment variable using the Advanced System Settings dialogue.
Refer to your operating system documentation for details.
```terminal
cd $INVOKEAI_ROOT
python -m venv .venv --prompt InvokeAI
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
4. Activate the new environment:
This will create InvokeAI folder where you will follow the rest of the
steps.
=== "Linux/Mac"
3. Create a directory of to contain your InvokeAI installation (known as the "runtime"
or "root" directory). This is where your models, configs, and outputs will live
by default. Please keep in mind the disk space requirements - you will need at
least 18GB (as of this writing) for the models and the virtual environment.
From now on we will refer to this directory as `INVOKEAI_ROOT`. This keeps the
runtime directory separate from the source code and aids in updating.
```bash
source .venv/bin/activate
```
```bash
export INVOKEAI_ROOT="~/invokeai"
mkdir ${INVOKEAI_ROOT}
```
=== "Windows"
4. From within the InvokeAI top-level directory, create and activate a virtual
environment named `.venv` and prompt displaying `InvokeAI`:
```ps
.venv\Scripts\activate
```
```bash
python -m venv ${INVOKEAI_ROOT}/.venv \
--prompt invokeai \
--upgrade-deps \
--copies
source ${INVOKEAI_ROOT}/.venv/bin/activate
```
If you get a permissions error at this point, run this command and try again
!!! warning
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
You **may** create your virtual environment anywhere on the filesystem.
But IF you choose a location that is *not* inside the `$INVOKEAI_ROOT` directory,
then you must set the `INVOKEAI_ROOT` environment variable in your shell environment,
for example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the Windows environment
variable. Refer to your operating system / shell documentation for the correct way of doing so.
The command-line prompt should change to to show `(InvokeAI)` at the
beginning of the prompt. Note that all the following steps should be
run while inside the INVOKEAI_ROOT directory
5. Make sure that pip is installed in your virtual environment and up to date:
5. Make sure that pip is installed in your virtual environment an up to date:
```bash
python -m pip install --upgrade pip
```
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
CUDA, ROCm and CPU/MPS drivers as shown below:
6. Install Package
=== "CUDA (NVidia)"
```bash
pip install --use-pep517 .
```
```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
=== "ROCm (AMD)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install InvokeAI --use-pep517
```
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
Deactivate and reactivate your runtime directory so that the invokeai-specific commands
become available in the environment
=== "Linux/Macintosh"
```
deactivate && source ${INVOKEAI_ROOT}/.venv/bin/activate
```
```bash
deactivate && source .venv/bin/activate
```
=== "Windows"
```ps
deactivate
.venv\Scripts\activate
```
8. Set up the runtime directory
7. Set up the runtime directory
In this step you will initialize your runtime directory with the downloaded
models, model config files, directory for textual inversion embeddings, and
your outputs.
```terminal
invokeai-configure
```bash
invokeai-configure --root ${INVOKEAI_ROOT}
```
The script `invokeai-configure` will interactively guide you through the
@@ -211,36 +119,35 @@ manager, please follow these steps:
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
process for this is described in [here](050_INSTALLING_MODELS.md).
9. Run the command-line- or the web- interface:
7. Run the command-line- or the web- interface:
From within INVOKEAI_ROOT, activate the environment
(with `source .venv/bin/activate` or `.venv\scripts\activate), and then run
the script `invokeai`. If the virtual environment you selected is NOT inside
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
`--root_dir \path\to\invokeai` to the commands below:
Activate the environment (with `source .venv/bin/activate`), and then run
the script `invokeai`. If you selected a non-default location for the
runtime directory, please specify the path with the `--root_dir` option
(abbreviated below as `--root`):
!!! example ""
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
=== "CLI"
```bash
invokeai
invokeai --root ~/invokeai
```
=== "local Webserver"
```bash
invokeai --web
invokeai --web --root ~/invokeai
```
=== "Public Webserver"
```bash
invokeai --web --host 0.0.0.0
invokeai --web --host 0.0.0.0 --root ~/invokeai
```
If you choose the run the web interface, point your browser at
@@ -248,122 +155,23 @@ manager, please follow these steps:
!!! tip
You can permanently set the location of the runtime directory
by setting the environment variable `INVOKEAI_ROOT` to the
path of the directory. As mentioned previously, this is
*highly recommended** if your virtual environment is located outside of
your runtime directory.
You can permanently set the location of the runtime directory by setting the environment variable `INVOKEAI_ROOT` to the path of the directory. As mentioned previously, this is
**required** if your virtual environment is located outside of your runtime directory.
10. Render away!
8. Render away!
Browse the [features](../features/CLI.md) section to learn about all the
things you can do with InvokeAI.
Note that some GPUs are slow to warm up. In particular, when using an AMD
card with the ROCm driver, you may have to wait for over a minute the first
time you try to generate an image. Fortunately, after the warm-up period
rendering will be fast.
11. Subsequently, to relaunch the script, activate the virtual environment, and
9. Subsequently, to relaunch the script, activate the virtual environment, and
then launch `invokeai` command. If you forget to activate the virtual
environment you will most likeley receive a `command not found` error.
!!! warning
Do not move the runtime directory after installation. The virtual environment will get confused if the directory is moved.
12. Other scripts
The [Textual Inversion](../features/TEXTUAL_INVERSION.md) script can be launched with the command:
```bash
invokeai-ti --gui
```
Similarly, the [Model Merging](../features/MODEL_MERGING.md) script can be launched with the command:
```bash
invokeai-merge --gui
```
Leave off the `--gui` option to run the script using command-line arguments. Pass the `--help` argument
to get usage instructions.
### Developer Install
If you have an interest in how InvokeAI works, or you would like to
add features or bugfixes, you are encouraged to install the source
code for InvokeAI. For this to work, you will need to install the
`git` source code management program. If it is not already installed
on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git)
1. From the command line, run this command:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository.
2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!)
3. Enter the InvokeAI repository directory and run one of these
commands, based on your GPU:
=== "CUDA (NVidia)"
```bash
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
=== "ROCm (AMD)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install -e . --use-pep517
```
Be sure to pass `-e` (for an editable install) and don't forget the
dot ("."). It is part of the command.
You can now run `invokeai` and its related commands. The code will be
read from the repository, so that you can edit the .py source files
and watch the code's behavior change.
4. If you wish to contribute to the InvokeAI project, you are
encouraged to establish a GitHub account and "fork"
https://github.com/invoke-ai/InvokeAI into your own copy of the
repository. You can then use GitHub functions to create and submit
pull requests to contribute improvements to the project.
Please see [Contributing](../index.md#contributing) for hints
on getting started.
### Unsupported Conda Install
Congratulations, you found the "secret" Conda installation
instructions. If you really **really** want to use Conda with InvokeAI
you can do so using this unsupported recipe:
```
mkdir ~/invokeai
conda create -n invokeai python=3.10
conda activate invokeai
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
invokeai-configure --root ~/invokeai
invokeai --root ~/invokeai --web
```
The `pip install` command shown in this recipe is for Linux/Windows
systems with an NVIDIA GPU. See step (6) above for the command to use
with other platforms/GPU combinations. If you don't wish to pass the
`--root` argument to `invokeai` with each launch, you may set the
environment variable INVOKEAI_ROOT to point to the installation directory.
Note that if you run into problems with the Conda installation, the InvokeAI
staff will **not** be able to help you out. Caveat Emptor!
Do not move the runtime directory after installation. The virtual environment has absolute paths in it that get confused if the directory is moved.

View File

@@ -1,133 +0,0 @@
---
title: NVIDIA Cuda / AMD ROCm
---
<figure markdown>
# :simple-nvidia: CUDA | :simple-amd: ROCm
</figure>
In order for InvokeAI to run at full speed, you will need a graphics
card with a supported GPU. InvokeAI supports NVidia cards via the CUDA
driver on Windows and Linux, and AMD cards via the ROCm driver on Linux.
## :simple-nvidia: CUDA
### Linux and Windows Install
If you have used your system for other graphics-intensive tasks, such
as gaming, you may very well already have the CUDA drivers
installed. To confirm, open up a command-line window and type:
```
nvidia-smi
```
If this command produces a status report on the GPU(s) installed on
your system, CUDA is installed and you have no more work to do. If
instead you get "command not found", or similar, then the driver will
need to be installed.
We strongly recommend that you install the CUDA Toolkit package
directly from NVIDIA. **Do not try to install Ubuntu's
nvidia-cuda-toolkit package. It is out of date and will cause
conflicts among the NVIDIA driver and binaries.**
Go to [CUDA Toolkit 11.7
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive),
and use the target selection wizard to choose your operating system,
hardware platform, and preferred installation method (e.g. "local"
versus "network").
This will provide you with a downloadable install file or, depending
on your choices, a recipe for downloading and running a install shell
script. Be sure to read and follow the full installation instructions.
After an install that seems successful, you can confirm by again
running `nvidia-smi` from the command line.
### Linux Install with a Runtime Container
On Linux systems, an alternative to installing CUDA Toolkit directly on
your system is to run an NVIDIA software container that has the CUDA
libraries already in place. This is recommended if you are already
familiar with containerization technologies such as Docker.
For downloads and instructions, visit the [NVIDIA CUDA Container
Runtime Site](https://developer.nvidia.com/nvidia-container-runtime)
### Torch Installation
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/cu117` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
## :simple-amd: ROCm
### Linux Install
AMD GPUs are only supported on Linux platforms due to the lack of a
Windows ROCm driver at the current time. Also be aware that support
for newer AMD GPUs is spotty. Your mileage may vary.
It is possible that the ROCm driver is already installed on your
machine. To test, open up a terminal window and issue the following
command:
```
rocminfo
```
If you get a table labeled "ROCm System Management Interface" the
driver is installed and you are done. If you get "command not found,"
then the driver needs to be installed.
Go to AMD's [ROCm Downloads
Guide](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation_new.html#installation-methods)
and scroll to the _Installation Methods_ section. Find the subsection
for the install method for your preferred Linux distribution, and
issue the commands given in the recipe.
Annoyingly, the official AMD site does not have a recipe for the most
recent version of Ubuntu, 22.04. However, this [community-contributed
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
to work well.
After installation, please run `rocminfo` a second time to confirm
that the driver is present and the GPU is recognized. You may need to
do a reboot in order to load the driver. In addition, if you see
errors relating to your username not being a member of the `render`
group, you may fix this by adding yourself to this group with the command:
```
sudo usermod -a -G render myUserName
```
(Thanks to @EgoringKosmos for the usermod recipe.)
### Linux Install with a ROCm-docker Container
If you are comfortable with the Docker containerization system, then
you can build a ROCm docker file. The source code and installation
recipes are available
[Here](https://github.com/RadeonOpenCompute/ROCm-docker/blob/master/quick-start.md)
### Torch Installation
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
This will be done automatically for you if you use the installer
script.
Be aware that the torch machine learning library does not seamlessly
interoperate with all AMD GPUs and you may experience garbled images,
black images, or long startup delays before rendering commences. Most
of these issues can be solved by Googling for workarounds. If you have
a problem and find a solution, please post an
[Issue](https://github.com/invoke-ai/InvokeAI/issues) so that other
users benefit and we can update this document.

View File

@@ -16,6 +16,10 @@ title: Installing with Docker
For general use, install locally to leverage your machine's GPU.
!!! tip "For running on a cloud instance/service"
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
## Why containers?
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
@@ -74,40 +78,38 @@ Some Suggestions of variables you may want to change besides the Token:
<figure markdown>
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch |
| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag |
| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository |
| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. |
| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used |
| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development |
| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url |
| Environment-Variable | Default value | Description |
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
</figure>
#### Build the Image
I provided a build script, which is located next to the Dockerfile in
`docker/build.sh`. It can be executed from repository root like this:
I provided a build script, which is located in `docker-build/build.sh` but still
needs to be executed from the Repository root.
```bash
./docker/build.sh
./docker-build/build.sh
```
The build Script not only builds the container, but also creates the docker
volume if not existing yet.
volume if not existing yet, or if empty it will just download the models.
#### Run the Container
After the build process is done, you can run the container via the provided
`docker/run.sh` script
`docker-build/run.sh` script
```bash
./docker/run.sh
./docker-build/run.sh
```
When used without arguments, the container will start the webserver and provide
@@ -117,7 +119,7 @@ also do so.
!!! example "run script example"
```bash
./docker/run.sh "banana sushi" -Ak_lms -S42 -s10
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
```
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
@@ -128,18 +130,16 @@ also do so.
## Running the container on your GPU
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running
the container with an extra environment variable to enable GPU usage and have
the process run much faster:
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
environment variable to enable GPU usage and have the process run much faster:
```bash
GPU_FLAGS=all ./docker/run.sh
GPU_FLAGS=all ./docker-build/run.sh
```
This passes the `--gpus all` to docker and uses the GPU.
If you don't have a GPU (or your host is not yet setup to use it) you will see a
message like this:
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
@@ -147,8 +147,84 @@ You can use the full set of GPU combinations documented here:
https://docs.docker.com/config/containers/resource_constraints/#gpu
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to
choose a specific device identified by a UUID.
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
## Running InvokeAI in the cloud with Docker
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
An advantage of this method is that it does not need any local setup or additional dependencies.
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
### Prerequisites
- a `docker` runtime
- `make` (optional but helps for convenience)
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
### Building and running the image locally
1. Clone this repo and `cd docker-build`
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
- `make configure` (This does *not* require a GPU-capable system)
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
- enter your Huggingface token when prompted
1. `make web`
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
#### Building and running without `make`
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
!!! example "Build the image and configure the runtime directory"
```Shell
cd docker-build
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
```
!!! example "Run the web server"
```Shell
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
```
Access the Web UI at http://localhost:9090
!!! example "Run the InvokeAI interactive CLI"
```
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
```
### Running the image in the cloud
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
1. build this image either in the cloud (you'll need to pull the repo), or locally
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
1. `docker pull` it on your cloud instance
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
1. use either one of the `docker run` commands above, substituting the image name for your own image.
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
The template's `README` provides ample detail, but at a high level, the process is as follows:
1. create a pod using this Docker image
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
1. Run the pod with `sleep infinity` as the Docker command
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
---
@@ -164,12 +240,13 @@ choose a specific device identified by a UUID.
If you're on a **Linux container** the `invoke` script is **automatically
started** and the output dir set to the Docker volume you created earlier.
If you're **directly on macOS follow these startup instructions**. With the
Conda environment activated (`conda activate ldm`), run the interactive
If you're **directly on macOS follow these startup instructions**.
With the Conda environment activated (`conda activate ldm`), run the interactive
interface that combines the functionality of the original scripts `txt2img` and
`img2img`: Use the more accurate but VRAM-intensive full precision math because
half-precision requires autocast and won't work. By default the images are saved
in `outputs/img-samples/`.
`img2img`:
Use the more accurate but VRAM-intensive full precision math because
half-precision requires autocast and won't work.
By default the images are saved in `outputs/img-samples/`.
```Shell
python3 scripts/invoke.py --full_precision
@@ -185,9 +262,9 @@ invoke> q
### Text to Image
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
image. This will let you know that everything is set up correctly. Then increase
steps to 100 or more for good (but slower) results. The prompt can be in quotes
or not.
image. This will let you know that everything is set up correctly.
Then increase steps to 100 or more for good (but slower) results.
The prompt can be in quotes or not.
```Shell
invoke> The hulk fighting with sheldon cooper -s5 -n1
@@ -200,9 +277,10 @@ You'll need to experiment to see if face restoration is making it better or
worse for your specific prompt.
If you're on a container the output is set to the Docker volume. You can copy it
wherever you want. You can download it from the Docker Desktop app, Volumes,
my-vol, data. Or you can copy it from your Mac terminal. Keep in mind
`docker cp` can't expand `*.png` so you'll need to specify the image file name.
wherever you want.
You can download it from the Docker Desktop app, Volumes, my-vol, data.
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
`*.png` so you'll need to specify the image file name.
On your host Mac (you can use the name of any container that mounted the
volume):

View File

@@ -4,456 +4,249 @@ title: Installing Models
# :octicons-paintbrush-16: Installing Models
## Checkpoint and Diffusers Models
## Model Weight Files
The model checkpoint files ('\*.ckpt') are the Stable Diffusion
"secret sauce". They are the product of training the AI on millions of
captioned images gathered from multiple sources.
The model weight files ('\*.ckpt') are the Stable Diffusion "secret sauce". They
are the product of training the AI on millions of captioned images gathered from
multiple sources.
Originally there was only a single Stable Diffusion weights file,
which many people named `model.ckpt`. Now there are hundreds
that have been fine tuned to provide particulary styles, genres, or
other features. In addition, there are several new formats that
improve on the original checkpoint format: a `.safetensors` format
which prevents malware from masquerading as a model, and `diffusers`
models, the most recent innovation.
Originally there was only a single Stable Diffusion weights file, which many
people named `model.ckpt`. Now there are dozens or more that have been "fine
tuned" to provide particulary styles, genres, or other features. InvokeAI allows
you to install and run multiple model weight files and switch between them
quickly in the command-line and web interfaces.
InvokeAI supports all three formats but strongly prefers the
`diffusers` format. These are distributed as directories containing
multiple subfolders, each of which contains a different aspect of the
model. The advantage of this is that the models load from disk really
fast. Another advantage is that `diffusers` models are supported by a
large and active set of open source developers working at and with
HuggingFace organization, and improvements in both rendering quality
and performance are being made at a rapid pace. Among other features
is the ability to download and install a `diffusers` model just by
providing its HuggingFace repository ID.
While InvokeAI will continue to support legacy `.ckpt` and `.safetensors`
models for the near future, these are deprecated and support will
be withdrawn in version 3.0, after which all legacy models will be
converted into diffusers at the time they are loaded.
This manual will guide you through installing and configuring model
weight files and converting legacy `.ckpt` and `.safetensors` files
into performant `diffusers` models.
This manual will guide you through installing and configuring model weight
files.
## Base Models
InvokeAI comes with support for a good set of starter models. You'll
find them listed in the master models file
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
subset that are currently installed are found in
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
InvokeAI comes with support for a good initial set of models listed in the model
configuration file `configs/models.yaml`. They are:
|Model Name | HuggingFace Repo ID | Description | URL |
|---------- | ---------- | ----------- | --- |
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
| Model | Weight File | Description | DOWNLOAD FROM |
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
| `<all models>` | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
Note that these files are covered by an "Ethical AI" license which
forbids certain uses. When you initially download them, you are asked
to accept the license terms. In addition, some of these models carry
additional license terms that limit their use in commercial
applications or on public servers. Be sure to familiarize yourself
with the model terms by visiting the URLs in the table above.
Note that these files are covered by an "Ethical AI" license which forbids
certain uses. You will need to create an account on the Hugging Face website and
accept the license terms before you can access the files.
The predefined configuration file for InvokeAI (located at
`configs/models.yaml`) provides entries for each of these weights files.
`stable-diffusion-1.5` is the default model used, and we strongly recommend that
you install this weights file if nothing else.
## Community-Contributed Models
There are too many to list here and more are being contributed every
day. [HuggingFace](https://huggingface.co/models?library=diffusers)
is a great resource for diffusers models, and is also the home of a
[fast-growing repository](https://huggingface.co/sd-concepts-library)
of embedding (".bin") models that add subjects and/or styles to your
images. The latter are automatically installed on the fly when you
include the text `<concept-name>` in your prompt. See [Concepts
Library](../features/CONCEPTS.md) for more information.
There are too many to list here and more are being contributed every day.
Hugging Face maintains a
[fast-growing repository](https://huggingface.co/sd-concepts-library) of
fine-tune (".bin") models that can be imported into InvokeAI by passing the
`--embedding_path` option to the `invoke.py` command.
Another popular site for community-contributed models is
[CIVITAI](https://civitai.com). This extensive site currently supports
only `.safetensors` and `.ckpt` models, but they can be easily loaded
into InvokeAI and/or converted into optimized `diffusers` models. Be
aware that CIVITAI hosts many models that generate NSFW content.
!!! note
InvokeAI 2.3.x does not support directly importing and
running Stable Diffusion version 2 checkpoint models. If you
try to import them, they will be automatically
converted into `diffusers` models on the fly. This adds about 20s
to loading time. To avoid this overhead, you are encouraged to
use one of the conversion methods described below to convert them
permanently.
[This page](https://rentry.org/sdmodels) hosts a large list of official and
unofficial Stable Diffusion models and where they can be obtained.
## Installation
There are multiple ways to install and manage models:
There are three ways to install weights files:
1. The `invokeai-model-install` script which will download and install them for you.
1. During InvokeAI installation, the `invokeai-configure` script can download
them for you.
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
models files.
2. You can use the command-line interface (CLI) to import, configure and modify
new models files.
3. The web interface (WebUI) has a GUI for importing and managing
models.
3. You can download the files manually and add the appropriate entries to
`models.yaml`.
### Installation via `invokeai-model-install`
### Installation via `invokeai-configure`
From the `invoke` launcher, choose option (5) "Download and install
models." This will launch the same script that prompted you to select
models at install time. You can use this to add models that you
skipped the first time around. It is all right to specify a model that
was previously downloaded; the script will just confirm that the files
are complete.
This is the most automatic way. Run `invokeai-configure` from the
console. It will ask you to select which models to download and lead you through
the steps of setting up a Hugging Face account if you haven't done so already.
This script allows you to load 3d party models. Look for a large text
entry box labeled "IMPORT LOCAL AND REMOTE MODELS." In this box, you
can cut and paste one or more of any of the following:
To start, run `invokeai-configure` from within the InvokeAI:
directory
1. A URL that points to a downloadable .ckpt or .safetensors file.
2. A file path pointing to a .ckpt or .safetensors file.
3. A diffusers model repo_id (from HuggingFace) in the format
"owner/repo_name".
4. A directory path pointing to a diffusers model directory.
5. A directory path pointing to a directory containing a bunch of
.ckpt and .safetensors files. All will be imported.
!!! example ""
You can enter multiple items into the textbox, each one on a separate
line. You can paste into the textbox using ctrl-shift-V or by dragging
and dropping a file/directory from the desktop into the box.
```text
Loading Python libraries...
The script also lets you designate a directory that will be scanned
for new model files each time InvokeAI starts up. These models will be
added automatically.
** INTRODUCTION **
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
and other large models that are needed for text to image generation. At any point you may interrupt
this program and resume later.
Lastly, the script gives you a checkbox option to convert legacy models
into diffusers, or to run the legacy model directly. If you choose to
convert, the original .ckpt/.safetensors file will **not** be deleted,
but a new diffusers directory will be created, using twice your disk
space. However, the diffusers version will load faster, and will be
compatible with InvokeAI 3.0.
** WEIGHT SELECTION **
Would you like to download the Stable Diffusion model weights now? [y]
Choose the weight file(s) you wish to download. Before downloading you
will be given the option to view and change your selections.
[1] stable-diffusion-1.5:
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
Download? [y]
[2] inpainting-1.5:
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
Download? [y]
[3] stable-diffusion-1.4:
The original Stable Diffusion version 1.4 weight file (4.27 GB)
Download? [n] n
[4] waifu-diffusion-1.3:
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
Download? [n] y
[5] ft-mse-improved-autoencoder-840000:
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
Download? [y] y
The following weight files will be downloaded:
[1] stable-diffusion-1.5*
[2] inpainting-1.5
[4] waifu-diffusion-1.3
[5] ft-mse-improved-autoencoder-840000
*default
Ok to download? [y]
** LICENSE AGREEMENT FOR WEIGHT FILES **
1. To download the Stable Diffusion weight files you need to read and accept the
CreativeML Responsible AI license. If you have not already done so, please
create an account using the "Sign Up" button:
https://huggingface.co
You will need to verify your email address as part of the HuggingFace
registration process.
2. After creating the account, login under your account and accept
the license terms located here:
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
Press <enter> when you are ready to continue:
...
```
When the script is complete, you will find the downloaded weights files in
`models/ldm/stable-diffusion-v1` and a matching configuration file in
`configs/models.yaml`.
You can run the script again to add any models you didn't select the first time.
Note that as a safety measure the script will _never_ remove a
previously-installed weights file. You will have to do this manually.
### Installation via the CLI
You can install a new model, including any of the community-supported ones, via
the command-line client's `!import_model` command.
#### Installing individual `.ckpt` and `.safetensors` models
1. First download the desired model weights file and place it under
`models/ldm/stable-diffusion-v1/`. You may rename the weights file to
something more memorable if you wish. Record the path of the weights file
(e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
If the model is already downloaded to your local disk, use
`!import_model /path/to/file.ckpt` to load it. For example:
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
```bash
invoke> !import_model C:/Users/fred/Downloads/martians.safetensors
```
3. At the `invoke>` command-line, enter the command
`!import_model <path to model>`. For example:
!!! tip "Forward Slashes"
On Windows systems, use forward slashes rather than backslashes
in your file paths.
If you do use backslashes,
you must double them like this:
`C:\\Users\\fred\\Downloads\\martians.safetensors`
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
Alternatively you can directly import the file using its URL:
!!! tip "the CLI supports file path autocompletion"
```bash
invoke> !import_model https://example.org/sd_models/martians.safetensors
```
For this to work, the URL must not be password-protected. Otherwise
you will receive a 404 error.
When you import a legacy model, the CLI will try to figure out what
type of model it is and select the correct load configuration file.
However, one thing it can't do is to distinguish between Stable
Diffusion 2.x models trained on 512x512 vs 768x768 images. In this
case, the CLI will pop up a menu of choices, asking you to select
which type of model it is. Please consult the model documentation to
identify the correct answer, as loading with the wrong configuration
will lead to black images. You can correct the model type after the
fact using the `!edit_model` command.
After importing, the model will load. If this is successful, you will
be asked if you want to keep the model loaded in memory to start
generating immediately. You'll also be asked if you wish to make this
the default model on startup. You can change this later using
`!edit_model`.
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
You may also point `!import_model` to a directory containing a set of
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
!!! example
```console
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
```
You will be given the option to import all models found in the
directory, or select which ones to import. If there are subfolders
within the directory, they will be searched for models to import.
#### Installing `diffusers` models
You can install a `diffusers` model from the HuggingFace site using
`!import_model` and the HuggingFace repo_id for the model:
```bash
invoke> !import_model andite/anything-v4.0
```
Alternatively, you can download the model to disk and import it from
there. The model may be distributed as a ZIP file, or as a Git
repository:
```bash
invoke> !import_model C:/Users/fred/Downloads/andite--anything-v4.0
```
!!! tip "The CLI supports file path autocompletion"
Type a bit of the path name and hit ++tab++ in order to get a choice of
possible completions.
!!! tip "On Windows, you can drag model files onto the command-line"
Once you have typed in `!import_model `, you can drag the
model file or directory onto the command-line to insert the model path. This way, you don't need to
type it or copy/paste. However, you will need to reverse or
double backslashes as noted above.
!!! tip "on Windows, you can drag model files onto the command-line"
Before installing, the CLI will ask you for a short name and
description for the model, whether to make this the default model that
is loaded at InvokeAI startup time, and whether to replace its
VAE. Generally the answer to the latter question is "no".
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
onto the command-line to insert the model path. This way, you don't need to
type it or copy/paste.
### Installation via the WebUI
4. Follow the wizard's instructions to complete installation as shown in the
example here:
To access the WebUI Model Manager, click on the button that looks like
a cube in the upper right side of the browser screen. This will bring
up a dialogue that lists the models you have already installed, and
allows you to load, delete or edit them:
!!! example ""
<figure markdown>
```text
invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
>> Model import in process. Please enter the values needed to configure this model:
![model-manager](../assets/installing-models/webui-models-1.png)
Name for this model: arabian-nights
Description of this model: Arabian Nights Fine Tune v1.0
Configuration file for this model: configs/stable-diffusion/v1-inference.yaml
Default image width: 512
Default image height: 512
>> New configuration:
arabian-nights:
config: configs/stable-diffusion/v1-inference.yaml
description: Arabian Nights Fine Tune v1.0
height: 512
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
width: 512
OK to import [n]? y
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
```
</figure>
If you've previously installed the fine-tune VAE file
`vae-ft-mse-840000-ema-pruned.ckpt`, the wizard will also ask you if you want to
add this VAE to the model.
To add a new model, click on **+ Add New** and select to either a
checkpoint/safetensors model, or a diffusers model:
The appropriate entry for this model will be added to `configs/models.yaml` and
it will be available to use in the CLI immediately.
<figure markdown>
The CLI has additional commands for switching among, viewing, editing, deleting
the available models. These are described in
[Command Line Client](../features/CLI.md#model-selection-and-importation), but
the two most frequently-used are `!models` and `!switch <name of model>`. The
first prints a table of models that InvokeAI knows about and their load status.
The second will load the requested model and lets you switch back and forth
quickly among loaded models.
![model-manager-add-new](../assets/installing-models/webui-models-2.png)
</figure>
In this example, we chose **Add Diffusers**. As shown in the figure
below, a new dialogue prompts you to enter the name to use for the
model, its description, and either the location of the `diffusers`
model on disk, or its Repo ID on the HuggingFace web site. If you
choose to enter a path to disk, the system will autocomplete for you
as you type:
<figure markdown>
![model-manager-add-diffusers](../assets/installing-models/webui-models-3.png)
</figure>
Press **Add Model** at the bottom of the dialogue (scrolled out of
site in the figure), and the model will be downloaded, imported, and
registered in `models.yaml`.
The **Add Checkpoint/Safetensor Model** option is similar, except that
in this case you can choose to scan an entire folder for
checkpoint/safetensors files to import. Simply type in the path of the
directory and press the "Search" icon. This will display the
`.ckpt` and `.safetensors` found inside the directory and its
subfolders, and allow you to choose which ones to import:
<figure markdown>
![model-manager-add-checkpoint](../assets/installing-models/webui-models-4.png)
</figure>
## Model Management Startup Options
The `invoke` launcher and the `invokeai` script accept a series of
command-line arguments that modify InvokeAI's behavior when loading
models. These can be provided on the command line, or added to the
InvokeAI root directory's `invokeai.init` initialization file.
The arguments are:
* `--model <model name>` -- Start up with the indicated model loaded
* `--ckpt_convert` -- When a checkpoint/safetensors model is loaded, convert it into a `diffusers` model in memory. This does not permanently save the converted model to disk.
* `--autoconvert <path/to/directory>` -- Scan the indicated directory path for new checkpoint/safetensors files, convert them into `diffusers` models, and import them into InvokeAI.
Here is an example of providing an argument on the command line using
the `invoke.sh` launch script:
```bash
invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
```
And here is what the same argument looks like in `invokeai.init`:
```bash
--outdir="/home/fred/invokeai/outputs
--no-nsfw_checker
--autoconvert /home/fred/stable-diffusion-checkpoints
```
### Specifying a configuration file for legacy checkpoints
Some checkpoint files come with instructions to use a specific .yaml
configuration file. For InvokeAI load this file correctly, please put
the config file in the same directory as the corresponding `.ckpt` or
`.safetensors` file and make sure the file has the same basename as
the model file. Here is an example:
```bash
wonderful-model-v2.ckpt
wonderful-model-v2.yaml
```
This is not needed for `diffusers` models, which come with their own
pre-packaged configuration.
### Specifying a custom VAE file for legacy checkpoints
To associate a custom VAE with a legacy file, place the VAE file in
the same directory as the corresponding `.ckpt` or
`.safetensors` file and make sure the file has the same basename as
the model file. Use the suffix `.vae.pt` for VAE checkpoint files, and
`.vae.safetensors` for VAE safetensors files. There is no requirement
that both the model and the VAE follow the same format.
Example:
```bash
wonderful-model-v2.pt
wonderful-model-v2.vae.safetensors
```
### Converting legacy models into `diffusers`
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
models file into `diffusers` and install it.This will enable the model
to load and run faster without loss of image quality.
The usage is identical to `!import_model`. You may point the command
to either a downloaded model file on disk, or to a (non-password
protected) URL:
```bash
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
```
After a successful conversion, the CLI will offer you the option of
deleting the original `.ckpt` or `.safetensors` file.
### Optimizing a previously-installed model
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
file and wish to convert it into a `diffusers` model, you can do this
without re-downloading and converting the original file using the
`!optimize_model` command. Simply pass the short name of an existing
installed model:
```bash
invoke> !optimize_model martians-v1.0
```
The model will be converted into `diffusers` format and replace the
previously installed version. You will again be offered the
opportunity to delete the original `.ckpt` or `.safetensors` file.
Alternatively you can use the WebUI's model manager to handle diffusers
optimization. Select the legacy model you wish to convert, and then
look for a button labeled "Convert to Diffusers" in the upper right of
the window.
### Related CLI Commands
There are a whole series of additional model management commands in
the CLI that you can read about in [Command-Line
Interface](../features/CLI.md). These include:
* `!models` - List all installed models
* `!switch <model name>` - Switch to the indicated model
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
* `!del_model <model name>` - Delete the indicated model
### Manually editing `configs/models.yaml`
### Manually editing of `configs/models.yaml`
If you are comfortable with a text editor then you may simply edit `models.yaml`
directly.
You will need to download the desired `.ckpt/.safetensors` file and
place it somewhere on your machine's filesystem. Alternatively, for a
`diffusers` model, record the repo_id or download the whole model
directory. Then using a **text** editor (e.g. the Windows Notepad
application), open the file `configs/models.yaml`, and add a new
stanza that follows this model:
First you need to download the desired .ckpt file and place it in
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the previous
section. Record the path to the weights file, e.g.
`models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
#### A legacy model
A legacy `.ckpt` or `.safetensors` entry will look like this:
Then using a **text** editor (e.g. the Windows Notepad application), open the
file `configs/models.yaml`, and add a new stanza that follows this model:
```yaml
arabian-nights-1.0:
description: A great fine-tune in Arabian Nights style
weights: ./path/to/arabian-nights-1.0.ckpt
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
config: ./configs/stable-diffusion/v1-inference.yaml
format: ckpt
width: 512
height: 512
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
default: false
```
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
#### A diffusers model
A stanza for a `diffusers` model will look like this for a HuggingFace
model with a repository ID:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
repo_id: captahab/arabian-nights-1.1
format: diffusers
default: true
```
And for a downloaded directory:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
path: /path/to/captahab-arabian-nights-1.1
format: diffusers
default: true
```
There is additional syntax for indicating an external VAE to use with
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
After you save the modified `models.yaml` file relaunch
`invokeai`. The new model will now be available for your use.
| name | description |
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
| description | Any description that you want to add to the model to remind you what it is. |
| weights | Relative path to the .ckpt weights file for this model. |
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `invokeai-configure` script. |
| vae | If you want to add a VAE file to the model, then enter its path here. |
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
available for your use.

View File

@@ -3,19 +3,7 @@ title: Overview
---
We offer several ways to install InvokeAI, each one suited to your
experience and preferences. We suggest that everyone start by
reviewing the
[hardware](010_INSTALL_AUTOMATED.md#hardware_requirements) and
[software](010_INSTALL_AUTOMATED.md#software_requirements)
requirements, as they are the same across each install method. Then
pick the install method most suitable to your level of experience and
needs.
See the [troubleshooting
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
install guide for frequently-encountered installation issues.
## Main Application
experience and preferences.
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
@@ -30,8 +18,8 @@ install guide for frequently-encountered installation issues.
InvokeAI and its dependencies. We offer two recipes: one suited to
those who prefer the `conda` tool, and one suited to those who prefer
`pip` and Python virtual environments. In our hands the pip install
is faster and more reliable, but your mileage may vary.
Note that the conda installation method is currently deprecated and
is faster and more reliable, but your mileage may vary.
Note that the conda installation method is currently deprecated and
will not be supported at some point in the future.
This method is recommended for users who have previously used `conda`
@@ -45,10 +33,3 @@ install guide for frequently-encountered installation issues.
InvokeAI and its dependencies. This method is recommended for
individuals with experience with Docker containers and understand
the pluses and minuses of a container-based install.
## Quick Guides
* [Installing CUDA and ROCm Drivers](./030_INSTALL_CUDA_AND_ROCM.md)
* [Installing XFormers](./070_INSTALL_XFORMERS.md)
* [Installing PyPatchMatch](./060_INSTALL_PATCHMATCH.md)
* [Installing New Models](./050_INSTALLING_MODELS.md)

73
docs/openapi3_0.yaml Normal file
View File

@@ -0,0 +1,73 @@
openapi: 3.0.3
info:
title: Stable Diffusion
description: |-
TODO: Description Here
Some useful links:
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
license:
name: MIT License
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
version: 1.0.0
servers:
- url: http://localhost:9090/api
tags:
- name: images
description: Retrieve and manage generated images
paths:
/images/{imageId}:
get:
tags:
- images
summary: Get image by ID
description: Returns a single image
operationId: getImageById
parameters:
- name: imageId
in: path
description: ID of image to return
required: true
schema:
type: string
responses:
'200':
description: successful operation
content:
image/png:
schema:
type: string
format: binary
'404':
description: Image not found
/intermediates/{intermediateId}/{step}:
get:
tags:
- images
summary: Get intermediate image by ID
description: Returns a single intermediate image
operationId: getIntermediateById
parameters:
- name: intermediateId
in: path
description: ID of intermediate to return
required: true
schema:
type: string
- name: step
in: path
description: The generation step of the intermediate
required: true
schema:
type: string
responses:
'200':
description: successful operation
content:
image/png:
schema:
type: string
format: binary
'404':
description: Intermediate not found

View File

@@ -23,16 +23,12 @@ We thank them for all of their time and hard work.
* @damian0815 - Attention Systems and Gameplay Engineer
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
* @keturn - Lead for Diffusers port
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
* @genomancer (Gregg Helt) - Model training and merging
* @gogurtenjoyer - User support and testing
* @whosawwhatsis - User support and testing
## **Contributions by**
- [tildebyte](https://github.com/tildebyte)
- [Sean McLellan](https://github.com/Oceanswave)
- [Kevin Gibbons](https://github.com/bakkot)
- [Tesseract Cat](https://github.com/TesseractCat)
@@ -80,7 +76,6 @@ We thank them for all of their time and hard work.
- [psychedelicious](https://github.com/psychedelicious)
- [damian0815](https://github.com/damian0815)
- [Eugene Brodsky](https://github.com/ebr)
- [Statcomm](https://github.com/statcomm)
## **Original CompVis Authors**

View File

@@ -1,19 +0,0 @@
# Translation
InvokeAI uses [Weblate](https://weblate.org) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
## Contributing
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
## Help & Questions
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @psychedelicious or @blessedcoolant on Discord if you have any questions.
## Thanks
Thanks to the InvokeAI community for their efforts to translate the project!

View File

@@ -0,0 +1,5 @@
mkdocs
mkdocs-material>=8, <9
mkdocs-git-revision-date-localized-plugin
mkdocs-redirects==1.2.0

Binary file not shown.

After

Width:  |  Height:  |  Size: 665 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 628 B

16
docs/swagger-ui/index.css Normal file
View File

@@ -0,0 +1,16 @@
html {
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after {
box-sizing: inherit;
}
body {
margin: 0;
background: #fafafa;
}

View File

@@ -0,0 +1,79 @@
<!doctype html>
<html lang="en-US">
<head>
<title>Swagger UI: OAuth2 Redirect</title>
</head>
<body>
<script>
'use strict';
function run () {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1).replace('?', '&');
} else {
qp = location.search.substring(1);
}
arr = qp.split("&");
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value);
}
) : {};
isValid = qp.state === sentState;
if ((
oauth2.auth.schema.get("flow") === "accessCode" ||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
oauth2.auth.schema.get("flow") === "authorization_code"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
} else {
let oauthErrorMsg;
if (qp.error) {
oauthErrorMsg = "["+qp.error+"]: " +
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: "+qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
});
}
} else {
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
}
window.close();
}
if (document.readyState !== 'loading') {
run();
} else {
document.addEventListener('DOMContentLoaded', function () {
run();
});
}
</script>
</body>
</html>

View File

@@ -0,0 +1,20 @@
window.onload = function() {
//<editor-fold desc="Changeable Configuration Block">
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
window.ui = SwaggerUIBundle({
url: "openapi3_0.yaml",
dom_id: '#swagger-ui',
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
});
//</editor-fold>
};

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -14,25 +14,22 @@ fi
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
PATCH=""
VERSION="v${VERSION}${PATCH}"
LATEST_TAG="v2.3-latest"
echo Building installer for version $VERSION
echo "Be certain that you're in the 'installer' directory before continuing."
read -p "Press any key to continue, or CTRL-C to exit..."
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
read -e -p "Commit and tag this repo with ${VERSION} and 'latest-2.3'? [n]: " input
RESPONSE=${input:='n'}
if [ "$RESPONSE" == 'y' ]; then
git commit -a
if ! git tag $VERSION ; then
echo "Existing/invalid tag"
exit -1
fi
git push origin :refs/tags/$LATEST_TAG
git tag -fa $LATEST_TAG
echo "remember to push --tags!"
git push origin :refs/tags/latest-2.3
git tag -fa latest-2.3
fi
# ----------------------
@@ -57,12 +54,12 @@ rm -rf InvokeAI-Installer
# copy content
mkdir InvokeAI-Installer
for f in templates lib *.txt *.reg; do
for f in templates *.py *.txt *.reg; do
cp -r ${f} InvokeAI-Installer/
done
# Move the wheel
mv dist/*.whl InvokeAI-Installer/lib/
mv dist/*.whl InvokeAI-Installer/
# Install scripts
# Mac/Linux
@@ -76,6 +73,17 @@ cp WinLongPathsEnabled.reg InvokeAI-Installer/
# Zip everything up
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
# Updater
mkdir tmp
cp templates/update.sh.in tmp/update.sh
cp templates/update.bat.in tmp/update.bat
chmod +x tmp/update.sh
chmod +x tmp/update.bat
cd tmp
zip InvokeAI-updater-$VERSION.zip update.sh update.bat
cd ..
mv tmp/InvokeAI-updater-$VERSION.zip .
# clean up
rm -rf InvokeAI-Installer tmp dist

View File

@@ -66,9 +66,8 @@ del /q .tmp1 .tmp2
@rem -------------- Install and Configure ---------------
call python .\lib\main.py
pause
exit /b
call python main.py
@rem ------------------------ Subroutines ---------------
@rem routine to do comparison of semantic version numbers

29
installer/install.sh.in Executable file → Normal file
View File

@@ -3,32 +3,5 @@
# make sure we are not already in a venv
# (don't need to check status)
deactivate >/dev/null 2>&1
scriptdir=$(dirname "$0")
cd $scriptdir
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
MINIMUM_PYTHON_VERSION=3.9.0
MAXIMUM_PYTHON_VERSION=3.11.0
PYTHON=""
for candidate in python3.10 python3.9 python3 python ; do
if ppath=`which $candidate`; then
python_version=$($ppath -V | awk '{ print $2 }')
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
PYTHON=$ppath
break
fi
fi
fi
done
if [ -z "$PYTHON" ]; then
echo "A suitable Python interpreter could not be found"
echo "Please install Python 3.9 or higher before running this script. See instructions at $INSTRUCTIONS for help."
read -p "Press any key to exit"
exit -1
fi
exec $PYTHON ./lib/main.py ${@}
read -p "Press any key to exit"
exec python3 $(dirname $0)/main.py ${@}

View File

@@ -38,8 +38,7 @@ class Installer:
self.reqs = INSTALLER_REQS
self.preflight()
if os.getenv("VIRTUAL_ENV") is not None:
print("A virtual environment is already activated. Please 'deactivate' before installation.")
sys.exit(-1)
raise NotImplementedError("A virtual environment is already activated. Please 'deactivate' before installation.")
self.bootstrap()
def preflight(self) -> None:
@@ -144,8 +143,8 @@ class Installer:
from plumbum import FG, local
python = local[get_python_from_venv(venv_dir)]
python[ "-m", "pip", "install", "--upgrade", "pip"] & FG
pip = local[get_pip_from_venv(venv_dir)]
pip[ "install", "--upgrade", "pip"] & FG
return venv_dir
@@ -241,19 +240,14 @@ class InvokeAiInstance:
from plumbum import FG, local
# Note that we're installing pinned versions of torch and
# torchvision here, which *should* correspond to what is
# in pyproject.toml. This is to prevent torch 2.0 from
# being installed and immediately uninstalled and replaced with 1.13
pip = local[self.pip]
(
pip[
"install",
"--require-virtualenv",
"torch~=1.13.1",
"torchvision~=0.14.1",
"--force-reinstall",
"torch",
"torchvision",
"--find-links" if find_links is not None else None,
find_links,
"--extra-index-url" if extra_index_url is not None else None,
@@ -289,7 +283,7 @@ class InvokeAiInstance:
if FF_USE_LOCAL_WHEEL:
# if no wheel, try to do a source install before giving up
try:
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
src = str(next(Path.cwd().glob("InvokeAI-*.whl")))
except StopIteration:
try:
src = Path(__file__).parents[1].expanduser().resolve()
@@ -330,7 +324,6 @@ class InvokeAiInstance:
Configure the InvokeAI runtime directory
"""
# set sys.argv to a consistent state
new_argv = [sys.argv[0]]
for i in range(1,len(sys.argv)):
el = sys.argv[i]
@@ -340,8 +333,7 @@ class InvokeAiInstance:
elif el in ['-y','--yes','--yes-to-all']:
new_argv.append(el)
sys.argv = new_argv
import requests # to catch download exceptions
from messages import introduction
introduction()
@@ -351,21 +343,10 @@ class InvokeAiInstance:
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script.
# this may change in the future with config refactoring!
succeeded = False
try:
invokeai_configure.main()
succeeded = True
except requests.exceptions.ConnectionError as e:
print(f'\nA network error was encountered during configuration and download: {str(e)}')
except OSError as e:
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
except Exception as e:
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
finally:
if not succeeded:
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
print('Alternatively you can relaunch the installer.')
# set sys.argv to a consistent state
invokeai_configure.main()
def install_user_scripts(self):
"""
@@ -378,14 +359,11 @@ class InvokeAiInstance:
scripts = ['invoke']
for script in scripts:
src = Path(__file__).parent / '..' / "templates" / f"{script}.{ext}.in"
src = Path(__file__).parent / "templates" / f"{script}.{ext}.in"
dest = self.runtime / f"{script}.{ext}"
shutil.copy(src, dest)
os.chmod(dest, 0o0755)
if OS == "Linux":
shutil.copy(Path(__file__).parents[1] / "templates" / "dialogrc", self.runtime / '.dialogrc')
def update(self):
pass
@@ -412,22 +390,6 @@ def get_pip_from_venv(venv_path: Path) -> str:
return str(venv_path.expanduser().resolve() / pip)
def get_python_from_venv(venv_path: Path) -> str:
"""
Given a path to a virtual environment, get the absolute path to the `python` executable
in a cross-platform fashion. Does not validate that the python executable
actually exists in the virtualenv.
:param venv_path: Path to the virtual environment
:type venv_path: Path
:return: Absolute path to the python executable
:rtype: str
"""
python = "Scripts\python.exe" if OS == "Windows" else "bin/python"
return str(venv_path.expanduser().resolve() / python)
def set_sys_path(venv_path: Path) -> None:
"""
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
@@ -484,7 +446,6 @@ def get_torch_source() -> (Union[str, None],str):
url = "https://download.pytorch.org/whl/cpu"
if device == 'cuda':
url = 'https://download.pytorch.org/whl/cu117'
optional_modules = '[xformers]'
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13

View File

@@ -9,9 +9,10 @@ from pathlib import Path
from prompt_toolkit import prompt
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.validation import Validator
from rich import box, print
from rich.console import Console, Group, group
from rich.console import Console, Group
from rich.panel import Panel
from rich.prompt import Confirm
from rich.style import Style
@@ -36,21 +37,17 @@ else:
def welcome():
@group()
def text():
if (platform_specific := _platform_specific_help()) != "":
yield platform_specific
yield ""
yield Text.from_markup("Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", justify="center")
console.rule()
print(
Panel(
title="[bold wheat1]Welcome to the InvokeAI Installer",
renderable=text(),
renderable=Text(
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry.",
justify="center",
),
box=box.DOUBLE,
expand=True,
width=80,
expand=False,
padding=(1, 2),
style=Style(bgcolor="grey23", color="orange1"),
subtitle=f"[bold grey39]{OS}-{ARCH}",
@@ -203,7 +200,7 @@ def graphical_accelerator():
[
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
"",
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
"See [steel_blue3]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
"",
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
]
@@ -297,16 +294,3 @@ def introduction() -> None:
)
)
console.line(2)
def _platform_specific_help()->str:
if OS == "Darwin":
text = Text.from_markup("""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""")
elif OS == "Windows":
text = Text.from_markup("""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
enable long path support on your system.
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""")
else:
text = ""
return text

View File

@@ -1,27 +0,0 @@
# Screen
use_shadow = OFF
use_colors = ON
screen_color = (BLACK, BLACK, ON)
# Box
dialog_color = (YELLOW, BLACK , ON)
title_color = (YELLOW, BLACK, ON)
border_color = (YELLOW, BLACK, OFF)
border2_color = (YELLOW, BLACK, OFF)
# Button
button_active_color = (RED, BLACK, OFF)
button_inactive_color = (YELLOW, BLACK, OFF)
button_label_active_color = (YELLOW,BLACK,ON)
button_label_inactive_color = (YELLOW,BLACK,ON)
# Menu box
menubox_color = (BLACK, BLACK, ON)
menubox_border_color = (YELLOW, BLACK, OFF)
menubox_border2_color = (YELLOW, BLACK, OFF)
# Menu window
item_color = (YELLOW, BLACK, OFF)
item_selected_color = (BLACK, YELLOW, OFF)
tag_key_color = (YELLOW, BLACK, OFF)
tag_key_selected_color = (BLACK, YELLOW, OFF)

View File

@@ -6,20 +6,14 @@ setlocal
call .venv\Scripts\activate.bat
set INVOKEAI_ROOT=.
:start
echo Do you want to generate images using the
echo 1. command-line interface
echo 1. command-line
echo 2. browser-based UI
echo 3. run textual inversion training
echo 4. merge models (diffusers type only)
echo 5. download and install models
echo 6. change InvokeAI startup options
echo 7. re-run the configure script to fix a broken install
echo 8. open the developer console
echo 9. update InvokeAI
echo 10. command-line help
echo Q - quit
set /P restore="Please enter 1-10, Q: [2] "
echo 5. re-run the configure script to download new models
echo 6. open the developer console
set /P restore="Please enter 1, 2, 3, 4 or 5: [5] "
if not defined restore set restore=2
IF /I "%restore%" == "1" (
echo Starting the InvokeAI command-line..
@@ -29,20 +23,14 @@ IF /I "%restore%" == "1" (
python .venv\Scripts\invokeai.exe --web %*
) ELSE IF /I "%restore%" == "3" (
echo Starting textual inversion training..
python .venv\Scripts\invokeai-ti.exe --gui
python .venv\Scripts\invokeai-ti.exe --gui %*
) ELSE IF /I "%restore%" == "4" (
echo Starting model merging script..
python .venv\Scripts\invokeai-merge.exe --gui
python .venv\Scripts\invokeai-merge.exe --gui %*
) ELSE IF /I "%restore%" == "5" (
echo Running invokeai-model-install...
python .venv\Scripts\invokeai-model-install.exe
echo Running invokeai-configure...
python .venv\Scripts\invokeai-configure.exe %*
) ELSE IF /I "%restore%" == "6" (
echo Running invokeai-configure...
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
) ELSE IF /I "%restore%" == "7" (
echo Running invokeai-configure...
python .venv\Scripts\invokeai-configure.exe --yes --default_only
) ELSE IF /I "%restore%" == "8" (
echo Developer Console
echo Python command is:
where python
@@ -54,27 +42,9 @@ IF /I "%restore%" == "1" (
echo *************************
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k
) ELSE IF /I "%restore%" == "9" (
echo Running invokeai-update...
python .venv\Scripts\invokeai-update.exe %*
) ELSE IF /I "%restore%" == "10" (
echo Displaying command line help...
python .venv\Scripts\invokeai.exe --help %*
pause
exit /b
) ELSE IF /I "%restore%" == "q" (
echo Goodbye!
goto ending
) ELSE (
echo Invalid selection
pause
exit /b
)
goto start
endlocal
pause
:ending
exit /b

Some files were not shown because too many files have changed in this diff Show More