Compare commits
205 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a1fe8e7fb | ||
|
|
ff56f5251b | ||
|
|
ed943bd6c7 | ||
|
|
7ad2355b1d | ||
|
|
66c920fc19 | ||
|
|
3fc5cb09f8 | ||
|
|
1345ec77ab | ||
|
|
b116715490 | ||
|
|
fa3670270e | ||
|
|
c304250ef6 | ||
|
|
802ce5dde5 | ||
|
|
311ee320ec | ||
|
|
e9df17b374 | ||
|
|
061fb4ef00 | ||
|
|
52be0d2396 | ||
|
|
4095acd10e | ||
|
|
201eb22d76 | ||
|
|
17ab982200 | ||
|
|
a04965b0e9 | ||
|
|
0b529f0c57 | ||
|
|
6f9f848345 | ||
|
|
918c1589ef | ||
|
|
116415b3fc | ||
|
|
b4b6eabaac | ||
|
|
4ef1f4a854 | ||
|
|
510fc4ebaa | ||
|
|
a20914434b | ||
|
|
0d134195fd | ||
|
|
649d8c8573 | ||
|
|
a358d370a0 | ||
|
|
94a9033c4f | ||
|
|
18a947c503 | ||
|
|
a23b031895 | ||
|
|
23af68c7d7 | ||
|
|
e258beeb51 | ||
|
|
7460c069b8 | ||
|
|
e481bfac61 | ||
|
|
5040747c67 | ||
|
|
d1ab65a431 | ||
|
|
af4ee7feb8 | ||
|
|
764fb29ade | ||
|
|
1014d3ba44 | ||
|
|
40a48aca88 | ||
|
|
92abc00f16 | ||
|
|
a5719aabf8 | ||
|
|
44a18511fa | ||
|
|
b850dbadaf | ||
|
|
9ef8b944d5 | ||
|
|
efc5a98488 | ||
|
|
1417c87928 | ||
|
|
2dd6fc2b93 | ||
|
|
22213612a0 | ||
|
|
71ee44a827 | ||
|
|
b17ca0a5e7 | ||
|
|
71bbfe4a1a | ||
|
|
5702271991 | ||
|
|
10781e7dc4 | ||
|
|
099d1157c5 | ||
|
|
ab825bf7ee | ||
|
|
10cfeb5ada | ||
|
|
e97515d045 | ||
|
|
0f04bc5789 | ||
|
|
3f74aabecd | ||
|
|
b1a99a51b7 | ||
|
|
8004f8a6d9 | ||
|
|
ff8ff2212a | ||
|
|
8e5363cd83 | ||
|
|
1450779146 | ||
|
|
8cd5d95b8a | ||
|
|
abd6407394 | ||
|
|
734dacfbe9 | ||
|
|
636620b1d5 | ||
|
|
1fe41146f0 | ||
|
|
2ad6ef355a | ||
|
|
865502ee4f | ||
|
|
c7984f3299 | ||
|
|
7f150ed833 | ||
|
|
badf4e256c | ||
|
|
e64c60bbb3 | ||
|
|
1780618543 | ||
|
|
f91fd27624 | ||
|
|
09e41e8f76 | ||
|
|
6eeb2107b3 | ||
|
|
17053ad8b7 | ||
|
|
fefb4dc1f8 | ||
|
|
d05b1b3544 | ||
|
|
82d4904c07 | ||
|
|
1cdcf33cfa | ||
|
|
6616fa835a | ||
|
|
7b9a4564b1 | ||
|
|
fcdefa0620 | ||
|
|
ef8b3ce639 | ||
|
|
36870a8f53 | ||
|
|
b70420951d | ||
|
|
1f0c5b4cf1 | ||
|
|
8648da8111 | ||
|
|
45b4593563 | ||
|
|
41b04316cf | ||
|
|
e97c6db2a3 | ||
|
|
896820a349 | ||
|
|
06c8f468bf | ||
|
|
61920e2701 | ||
|
|
f34ba7ca70 | ||
|
|
c30ef0895d | ||
|
|
aa3a774f73 | ||
|
|
2c30555b84 | ||
|
|
743f605773 | ||
|
|
519c661abb | ||
|
|
22c956c75f | ||
|
|
13696adc3a | ||
|
|
0196571a12 | ||
|
|
9666f466ab | ||
|
|
240e5486c8 | ||
|
|
8164b6b9cf | ||
|
|
4fc82d554f | ||
|
|
96b34c0f85 | ||
|
|
dd5a88dcee | ||
|
|
95ed56bf82 | ||
|
|
1ae80f5ab9 | ||
|
|
1f0bd3ca6c | ||
|
|
a1971f6830 | ||
|
|
c6118e8898 | ||
|
|
7ba958cf7f | ||
|
|
383905d5d2 | ||
|
|
6173e3e9ca | ||
|
|
3feb7d8922 | ||
|
|
1d9edbd0dd | ||
|
|
d439abdb89 | ||
|
|
ee47ea0c89 | ||
|
|
300bb2e627 | ||
|
|
ccf8593501 | ||
|
|
0fda612f3f | ||
|
|
5afff65b71 | ||
|
|
7e55bdefce | ||
|
|
620cf84d3d | ||
|
|
cfe567c62a | ||
|
|
cefe12f1df | ||
|
|
1e51c39928 | ||
|
|
42a02bbb80 | ||
|
|
f1ae6dae4c | ||
|
|
6195579910 | ||
|
|
16c8b23b34 | ||
|
|
07ae626b22 | ||
|
|
8d171bb044 | ||
|
|
6e33ca7e9e | ||
|
|
db46e12f2b | ||
|
|
868e4b2db8 | ||
|
|
2e562742c1 | ||
|
|
68e6958009 | ||
|
|
ea6e3a7949 | ||
|
|
b2879ca99f | ||
|
|
4e911566c3 | ||
|
|
9bafda6a15 | ||
|
|
871a8a5375 | ||
|
|
0eef74bc00 | ||
|
|
423ae32097 | ||
|
|
8282e5d045 | ||
|
|
19305cdbdf | ||
|
|
eb9028ab30 | ||
|
|
21483f5d07 | ||
|
|
82dcbac28f | ||
|
|
d43bd4625d | ||
|
|
ea891324a2 | ||
|
|
8fd9ea2193 | ||
|
|
fb02666856 | ||
|
|
f6f5c2731b | ||
|
|
b4e3f771e0 | ||
|
|
99bb9491ac | ||
|
|
0453f21127 | ||
|
|
9fc09aa4bd | ||
|
|
5e87062cf8 | ||
|
|
3e7a459990 | ||
|
|
bbf4c03e50 | ||
|
|
611a3a9753 | ||
|
|
1611f0d181 | ||
|
|
08835115e4 | ||
|
|
2d84e28d32 | ||
|
|
ef17aae8ab | ||
|
|
0cc39f01a3 | ||
|
|
688d7258f1 | ||
|
|
4513320bf1 | ||
|
|
533fd04ef0 | ||
|
|
dff5681cf0 | ||
|
|
5a2790a69b | ||
|
|
7c5305ccba | ||
|
|
4013e8ad6f | ||
|
|
d1dfd257f9 | ||
|
|
5322d735ee | ||
|
|
cdb107dcda | ||
|
|
be1393a41c | ||
|
|
e554c2607f | ||
|
|
6215592b12 | ||
|
|
349cc25433 | ||
|
|
214d276379 | ||
|
|
ef24d76adc | ||
|
|
ab2b5a691d | ||
|
|
c7de2b2801 | ||
|
|
e8075658ac | ||
|
|
4202dabee1 | ||
|
|
d67db2bcf1 | ||
|
|
7159ec885f | ||
|
|
b5cf734ba9 | ||
|
|
f7dc8eafee | ||
|
|
762ca60a30 | ||
|
|
e7fb9f342c |
@@ -1,19 +1,3 @@
|
|||||||
*
|
*
|
||||||
!backend
|
!environment*.yml
|
||||||
!environments-and-requirements
|
!docker-build
|
||||||
!frontend
|
|
||||||
!ldm
|
|
||||||
!main.py
|
|
||||||
!scripts
|
|
||||||
!server
|
|
||||||
!static
|
|
||||||
!setup.py
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
|
||||||
**/*.pt*
|
|
||||||
|
|
||||||
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
|
||||||
!configs
|
|
||||||
configs/models.yaml
|
|
||||||
|
|
||||||
**/__pycache__
|
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
# All files
|
|
||||||
[*]
|
|
||||||
charset = utf-8
|
|
||||||
end_of_line = lf
|
|
||||||
indent_size = 2
|
|
||||||
indent_style = space
|
|
||||||
insert_final_newline = true
|
|
||||||
trim_trailing_whitespace = true
|
|
||||||
|
|
||||||
# Python
|
|
||||||
[*.py]
|
|
||||||
indent_size = 4
|
|
||||||
2
.gitattributes
vendored
@@ -1,4 +1,4 @@
|
|||||||
# Auto normalizes line endings on commit so devs don't need to change local settings.
|
# Auto normalizes line endings on commit so devs don't need to change local settings.
|
||||||
# Only affects text files and ignores other file types.
|
# Only affects text files and ignores other file types.
|
||||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||||
* text=auto
|
* text=auto
|
||||||
|
|||||||
2
.github/CODEOWNERS
vendored
@@ -3,5 +3,3 @@ ldm/invoke/server_legacy.py @CapableWeb
|
|||||||
scripts/legacy_api.py @CapableWeb
|
scripts/legacy_api.py @CapableWeb
|
||||||
tests/legacy_tests.sh @CapableWeb
|
tests/legacy_tests.sh @CapableWeb
|
||||||
installer/ @tildebyte
|
installer/ @tildebyte
|
||||||
.github/workflows/ @mauwii
|
|
||||||
docker_build/ @mauwii
|
|
||||||
|
|||||||
87
.github/workflows/build-cloud-img.yml
vendored
@@ -1,87 +0,0 @@
|
|||||||
name: Build and push cloud image
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
# push:
|
|
||||||
# branches:
|
|
||||||
# - main
|
|
||||||
# tags:
|
|
||||||
# - v*
|
|
||||||
# # we will NOT push the image on pull requests, only test buildability.
|
|
||||||
# pull_request:
|
|
||||||
# branches:
|
|
||||||
# - main
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
# requires resolving a patchmatch issue
|
|
||||||
# - aarch64
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
if: matrix.arch == 'aarch64'
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
# see https://github.com/docker/metadata-action
|
|
||||||
# will push the following tags:
|
|
||||||
# :edge
|
|
||||||
# :main (+ any other branches enabled in the workflow)
|
|
||||||
# :<tag>
|
|
||||||
# :1.2.3 (for semver tags)
|
|
||||||
# :1.2 (for semver tags)
|
|
||||||
# :<sha>
|
|
||||||
tags: |
|
|
||||||
type=edge,branch=main
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=tag
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=sha
|
|
||||||
# suffix image tags with architecture
|
|
||||||
flavor: |
|
|
||||||
latest=auto
|
|
||||||
suffix=-${{ matrix.arch }},latest=true
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
|
|
||||||
# do not login to container registry on PRs
|
|
||||||
- if: github.event_name != 'pull_request'
|
|
||||||
name: Docker login
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push cloud image
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: docker-build/Dockerfile.cloud
|
|
||||||
platforms: Linux/${{ matrix.arch }}
|
|
||||||
# do not push the image on PRs
|
|
||||||
push: false
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
74
.github/workflows/build-container.yml
vendored
@@ -1,74 +1,48 @@
|
|||||||
|
# Building the Image without pushing to confirm it is still buildable
|
||||||
|
# confirum functionality would unfortunately need way more resources
|
||||||
name: build container image
|
name: build container image
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
|
- 'development'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
registry:
|
arch:
|
||||||
- ghcr.io
|
- x86_64
|
||||||
flavor:
|
- aarch64
|
||||||
- amd
|
|
||||||
- cuda
|
|
||||||
# - cloud
|
|
||||||
include:
|
include:
|
||||||
- flavor: amd
|
- arch: x86_64
|
||||||
pip-requirements: requirements-lin-amd.txt
|
conda-env-file: environment-lin-cuda.yml
|
||||||
dockerfile: docker-build/Dockerfile
|
- arch: aarch64
|
||||||
platforms: linux/amd64,linux/arm64
|
conda-env-file: environment-lin-aarch64.yml
|
||||||
- flavor: cuda
|
|
||||||
pip-requirements: requirements-lin-cuda.txt
|
|
||||||
dockerfile: docker-build/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
# - flavor: cloud
|
|
||||||
# pip-requirements: requirements-lin-cuda.txt
|
|
||||||
# dockerfile: docker-build/Dockerfile.cloud
|
|
||||||
# platforms: linux/amd64
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
|
- name: prepare docker-tag
|
||||||
|
env:
|
||||||
|
repository: ${{ github.repository }}
|
||||||
|
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }}
|
|
||||||
tags: |
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=tag
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=sha
|
|
||||||
flavor: |
|
|
||||||
latest=true
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- if: github.event_name != 'pull_request'
|
|
||||||
name: Docker login
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ${{ matrix.registry }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: docker-build/Dockerfile
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: Linux/${{ matrix.arch }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: false
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ env.dockertag }}:${{ matrix.arch }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
build-args: |
|
||||||
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
conda_env_file=${{ matrix.conda-env-file }}
|
||||||
|
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
|
||||||
|
invokeai_git=${{ github.repository }}
|
||||||
|
invokeai_branch=${{ github.ref_name }}
|
||||||
|
|||||||
28
.github/workflows/lint-frontend.yml
vendored
@@ -1,28 +0,0 @@
|
|||||||
name: Lint frontend
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'frontend/**'
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- 'frontend/**'
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: frontend
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint-frontend:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: Setup Node 18
|
|
||||||
uses: actions/setup-node@v3
|
|
||||||
with:
|
|
||||||
node-version: '18'
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- run: 'yarn install --frozen-lockfile'
|
|
||||||
- run: 'yarn tsc'
|
|
||||||
- run: 'yarn run madge'
|
|
||||||
- run: 'yarn run lint --max-warnings=0'
|
|
||||||
- run: 'yarn run prettier --check'
|
|
||||||
2
.github/workflows/mkdocs-material.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
|||||||
- name: install requirements
|
- name: install requirements
|
||||||
run: |
|
run: |
|
||||||
python -m \
|
python -m \
|
||||||
pip install -r docs/requirements-mkdocs.txt
|
pip install -r requirements-mkdocs.txt
|
||||||
|
|
||||||
- name: confirm buildability
|
- name: confirm buildability
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
19
.github/workflows/pyflakes.yml
vendored
@@ -1,19 +0,0 @@
|
|||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- development
|
|
||||||
- 'release-candidate-*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pyflakes:
|
|
||||||
name: runner / pyflakes
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: pyflakes
|
|
||||||
uses: reviewdog/action-pyflakes@v1
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
reporter: github-pr-review
|
|
||||||
121
.github/workflows/test-invoke-conda.yml
vendored
@@ -3,68 +3,41 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
|
- 'development'
|
||||||
|
- 'fix-gh-actions-fork'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
types:
|
- 'development'
|
||||||
- 'ready_for_review'
|
|
||||||
- 'opened'
|
|
||||||
- 'synchronize'
|
|
||||||
- 'converted_to_draft'
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
fail_if_pull_request_is_draft:
|
|
||||||
if: github.event.pull_request.draft == true
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
|
||||||
run: exit 1
|
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
stable-diffusion-model:
|
stable-diffusion-model:
|
||||||
- 'stable-diffusion-1.5'
|
# - 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
|
||||||
environment-yaml:
|
- 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
|
||||||
- environment-lin-amd.yml
|
os:
|
||||||
- environment-lin-cuda.yml
|
- ubuntu-latest
|
||||||
- environment-mac.yml
|
- macOS-12
|
||||||
- environment-win-cuda.yml
|
|
||||||
include:
|
include:
|
||||||
- environment-yaml: environment-lin-amd.yml
|
- os: ubuntu-latest
|
||||||
os: ubuntu-22.04
|
environment-file: environment-lin-cuda.yml
|
||||||
curl-command: curl
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
default-shell: bash -l {0}
|
default-shell: bash -l {0}
|
||||||
- environment-yaml: environment-lin-cuda.yml
|
- os: macOS-12
|
||||||
os: ubuntu-22.04
|
environment-file: environment-mac.yml
|
||||||
curl-command: curl
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
default-shell: bash -l {0}
|
default-shell: bash -l {0}
|
||||||
- environment-yaml: environment-mac.yml
|
# - stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||||
os: macos-12
|
# stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||||
curl-command: curl
|
# stable-diffusion-model-switch: stable-diffusion-1.4
|
||||||
github-env: $GITHUB_ENV
|
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
default-shell: bash -l {0}
|
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||||
- environment-yaml: environment-win-cuda.yml
|
stable-diffusion-model-switch: stable-diffusion-1.5
|
||||||
os: windows-2022
|
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
|
||||||
curl-command: curl.exe
|
|
||||||
github-env: $env:GITHUB_ENV
|
|
||||||
default-shell: pwsh
|
|
||||||
- stable-diffusion-model: stable-diffusion-1.5
|
|
||||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
|
||||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
|
||||||
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
|
||||||
name: ${{ matrix.environment-yaml }} on ${{ matrix.os }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
env:
|
||||||
CONDA_ENV_NAME: invokeai
|
CONDA_ENV_NAME: invokeai
|
||||||
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: ${{ matrix.default-shell }}
|
shell: ${{ matrix.default-shell }}
|
||||||
@@ -74,19 +47,17 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: create models.yaml from example
|
- name: create models.yaml from example
|
||||||
run: |
|
run: cp configs/models.yaml.example configs/models.yaml
|
||||||
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
|
||||||
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
|
|
||||||
|
|
||||||
- name: create environment.yml
|
- name: create environment.yml
|
||||||
run: cp "environments-and-requirements/${{ matrix.environment-yaml }}" environment.yml
|
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
||||||
|
|
||||||
- name: Use cached conda packages
|
- name: Use cached conda packages
|
||||||
id: use-cached-conda-packages
|
id: use-cached-conda-packages
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ~/conda_pkgs_dir
|
path: ~/conda_pkgs_dir
|
||||||
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-yaml) }}
|
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-file) }}
|
||||||
|
|
||||||
- name: Activate Conda Env
|
- name: Activate Conda Env
|
||||||
id: activate-conda-env
|
id: activate-conda-env
|
||||||
@@ -98,64 +69,58 @@ jobs:
|
|||||||
|
|
||||||
- name: set test prompt to main branch validation
|
- name: set test prompt to main branch validation
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: set test prompt to development branch validation
|
- name: set test prompt to development branch validation
|
||||||
if: ${{ github.ref == 'refs/heads/development' }}
|
if: ${{ github.ref == 'refs/heads/development' }}
|
||||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: set test prompt to Pull Request validation
|
- name: set test prompt to Pull Request validation
|
||||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Use Cached Stable Diffusion Model
|
- name: Use Cached Stable Diffusion Model
|
||||||
id: cache-sd-model
|
id: cache-sd-model
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
env:
|
env:
|
||||||
cache-name: cache-${{ matrix.stable-diffusion-model }}
|
cache-name: cache-${{ matrix.stable-diffusion-model-switch }}
|
||||||
with:
|
with:
|
||||||
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
|
path: ${{ matrix.stable-diffusion-model-dl-path }}
|
||||||
key: ${{ env.cache-name }}
|
key: ${{ env.cache-name }}
|
||||||
|
|
||||||
- name: Download ${{ matrix.stable-diffusion-model }}
|
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
||||||
id: download-stable-diffusion-model
|
id: download-stable-diffusion-model
|
||||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||||
run: |
|
run: |
|
||||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||||
|
curl \
|
||||||
|
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||||
|
-o ${{ matrix.stable-diffusion-model-dl-path }} \
|
||||||
|
-L ${{ matrix.stable-diffusion-model }}
|
||||||
|
|
||||||
- name: run configure_invokeai.py
|
- name: run preload_models.py
|
||||||
id: run-preload-models
|
id: run-preload-models
|
||||||
run: |
|
run: |
|
||||||
python scripts/configure_invokeai.py --skip-sd-weights --yes
|
python scripts/preload_models.py \
|
||||||
|
--no-interactive
|
||||||
- name: cat invokeai.init
|
|
||||||
id: cat-invokeai
|
|
||||||
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
|
|
||||||
|
|
||||||
- name: Run the tests
|
- name: Run the tests
|
||||||
id: run-tests
|
id: run-tests
|
||||||
if: matrix.os != 'windows-2022'
|
|
||||||
run: |
|
run: |
|
||||||
time python scripts/invoke.py \
|
time python scripts/invoke.py \
|
||||||
--no-patchmatch \
|
--model ${{ matrix.stable-diffusion-model-switch }} \
|
||||||
--no-nsfw_checker \
|
--from_file ${{ env.TEST_PROMPTS }}
|
||||||
--model ${{ matrix.stable-diffusion-model }} \
|
|
||||||
--from_file ${{ env.TEST_PROMPTS }} \
|
|
||||||
--root="${{ env.INVOKEAI_ROOT }}" \
|
|
||||||
--outdir="${{ env.INVOKEAI_ROOT }}/outputs"
|
|
||||||
|
|
||||||
- name: export conda env
|
- name: export conda env
|
||||||
id: export-conda-env
|
id: export-conda-env
|
||||||
if: matrix.os != 'windows-2022'
|
|
||||||
run: |
|
run: |
|
||||||
mkdir -p outputs/img-samples
|
mkdir -p outputs/img-samples
|
||||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||||
|
|
||||||
- name: Archive results
|
- name: Archive results
|
||||||
if: matrix.os != 'windows-2022'
|
|
||||||
id: archive-results
|
id: archive-results
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
name: results_${{ matrix.os }}_${{ matrix.stable-diffusion-model-switch }}
|
||||||
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
path: outputs/img-samples
|
||||||
|
|||||||
144
.github/workflows/test-invoke-pip.yml
vendored
@@ -1,144 +0,0 @@
|
|||||||
name: Test invoke.py pip
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
types:
|
|
||||||
- 'ready_for_review'
|
|
||||||
- 'opened'
|
|
||||||
- 'synchronize'
|
|
||||||
- 'converted_to_draft'
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
fail_if_pull_request_is_draft:
|
|
||||||
if: github.event.pull_request.draft == true
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
|
||||||
run: exit 1
|
|
||||||
matrix:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
stable-diffusion-model:
|
|
||||||
- stable-diffusion-1.5
|
|
||||||
requirements-file:
|
|
||||||
- requirements-lin-cuda.txt
|
|
||||||
- requirements-lin-amd.txt
|
|
||||||
- requirements-mac-mps-cpu.txt
|
|
||||||
- requirements-win-colab-cuda.txt
|
|
||||||
python-version:
|
|
||||||
# - '3.9'
|
|
||||||
- '3.10'
|
|
||||||
include:
|
|
||||||
- requirements-file: requirements-lin-cuda.txt
|
|
||||||
os: ubuntu-22.04
|
|
||||||
curl-command: curl
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- requirements-file: requirements-lin-amd.txt
|
|
||||||
os: ubuntu-22.04
|
|
||||||
curl-command: curl
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- requirements-file: requirements-mac-mps-cpu.txt
|
|
||||||
os: macOS-12
|
|
||||||
curl-command: curl
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- requirements-file: requirements-win-colab-cuda.txt
|
|
||||||
os: windows-2022
|
|
||||||
curl-command: curl.exe
|
|
||||||
github-env: $env:GITHUB_ENV
|
|
||||||
- stable-diffusion-model: stable-diffusion-1.5
|
|
||||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
|
||||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
|
||||||
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
|
||||||
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
id: checkout-sources
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: set INVOKEAI_ROOT Windows
|
|
||||||
if: matrix.os == 'windows-2022'
|
|
||||||
run: |
|
|
||||||
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
|
|
||||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: set INVOKEAI_ROOT others
|
|
||||||
if: matrix.os != 'windows-2022'
|
|
||||||
run: |
|
|
||||||
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
|
|
||||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: create models.yaml from example
|
|
||||||
run: |
|
|
||||||
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
|
||||||
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
|
|
||||||
|
|
||||||
- name: set test prompt to main branch validation
|
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: set test prompt to development branch validation
|
|
||||||
if: ${{ github.ref == 'refs/heads/development' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: set test prompt to Pull Request validation
|
|
||||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: create requirements.txt
|
|
||||||
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
|
||||||
|
|
||||||
- name: setup python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
# cache: 'pip'
|
|
||||||
# cache-dependency-path: ${{ matrix.requirements-file }}
|
|
||||||
|
|
||||||
- name: install dependencies
|
|
||||||
run: pip3 install --upgrade pip setuptools wheel
|
|
||||||
|
|
||||||
- name: install requirements
|
|
||||||
run: pip3 install -r '${{ matrix.requirements-file }}'
|
|
||||||
|
|
||||||
- name: Use Cached Stable Diffusion Model
|
|
||||||
id: cache-sd-model
|
|
||||||
uses: actions/cache@v3
|
|
||||||
env:
|
|
||||||
cache-name: cache-${{ matrix.stable-diffusion-model }}
|
|
||||||
with:
|
|
||||||
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
|
|
||||||
key: ${{ env.cache-name }}
|
|
||||||
|
|
||||||
- name: Download ${{ matrix.stable-diffusion-model }}
|
|
||||||
id: download-stable-diffusion-model
|
|
||||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
|
||||||
run: |
|
|
||||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
|
||||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
|
||||||
|
|
||||||
- name: run configure_invokeai.py
|
|
||||||
id: run-preload-models
|
|
||||||
run: python3 scripts/configure_invokeai.py --skip-sd-weights --yes
|
|
||||||
|
|
||||||
- name: Run the tests
|
|
||||||
id: run-tests
|
|
||||||
if: matrix.os != 'windows-2022'
|
|
||||||
run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}"
|
|
||||||
|
|
||||||
- name: Archive results
|
|
||||||
id: archive-results
|
|
||||||
if: matrix.os != 'windows-2022'
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
|
||||||
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
|
||||||
21
.gitignore
vendored
@@ -6,7 +6,6 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
|||||||
# ignore user models config
|
# ignore user models config
|
||||||
configs/models.user.yaml
|
configs/models.user.yaml
|
||||||
config/models.user.yml
|
config/models.user.yml
|
||||||
invokeai.init
|
|
||||||
|
|
||||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||||
anaconda.sh
|
anaconda.sh
|
||||||
@@ -195,6 +194,10 @@ checkpoints
|
|||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!frontend/*
|
!frontend/*
|
||||||
|
frontend/apt-get
|
||||||
|
frontend/dist
|
||||||
|
frontend/sudo
|
||||||
|
frontend/update
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@@ -215,7 +218,7 @@ models/clipseg
|
|||||||
models/gfpgan
|
models/gfpgan
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
.invokeai
|
invokeai.init
|
||||||
|
|
||||||
# ignore environment.yml and requirements.txt
|
# ignore environment.yml and requirements.txt
|
||||||
# these are links to the real files in environments-and-requirements
|
# these are links to the real files in environments-and-requirements
|
||||||
@@ -223,14 +226,12 @@ environment.yml
|
|||||||
requirements.txt
|
requirements.txt
|
||||||
|
|
||||||
# source installer files
|
# source installer files
|
||||||
installer/*zip
|
source_installer/*zip
|
||||||
installer/install.bat
|
source_installer/invokeAI
|
||||||
installer/install.sh
|
install.bat
|
||||||
installer/update.bat
|
install.sh
|
||||||
installer/update.sh
|
update.bat
|
||||||
|
update.sh
|
||||||
|
|
||||||
# this may be present if the user created a venv
|
# this may be present if the user created a venv
|
||||||
invokeai
|
invokeai
|
||||||
|
|
||||||
# no longer stored in source directory
|
|
||||||
models
|
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
|
||||||
nationality, personal appearance, race, religion, or sexual identity
|
|
||||||
and orientation.
|
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
|
||||||
diverse, inclusive, and healthy community.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
|
||||||
community include:
|
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
|
||||||
* Giving and gracefully accepting constructive feedback
|
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
|
||||||
and learning from the experience
|
|
||||||
* Focusing on what is best not just for us as individuals, but for the
|
|
||||||
overall community
|
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or
|
|
||||||
advances of any kind
|
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or email
|
|
||||||
address, without their explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
|
||||||
or harmful.
|
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
|
||||||
decisions when appropriate.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
|
||||||
an individual is officially representing the community in public spaces.
|
|
||||||
Examples of representing our community include using an official e-mail address,
|
|
||||||
posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
|
||||||
may be reported to the community leaders responsible for enforcement
|
|
||||||
at https://github.com/invoke-ai/InvokeAI/issues. All complaints will
|
|
||||||
be reviewed and investigated promptly and fairly.
|
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
|
||||||
reporter of any incident.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
|
||||||
|
|
||||||
### 1. Correction
|
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
|
||||||
behavior was inappropriate. A public apology may be requested.
|
|
||||||
|
|
||||||
### 2. Warning
|
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series
|
|
||||||
of actions.
|
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
|
||||||
interaction with the people involved, including unsolicited interaction with
|
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
|
||||||
like social media. Violating these terms may lead to a temporary or
|
|
||||||
permanent ban.
|
|
||||||
|
|
||||||
### 3. Temporary Ban
|
|
||||||
|
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
|
||||||
communication with the community for a specified period of time. No public or
|
|
||||||
private interaction with the people involved, including unsolicited interaction
|
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
|
||||||
Violating these terms may lead to a permanent ban.
|
|
||||||
|
|
||||||
### 4. Permanent Ban
|
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within
|
|
||||||
the community.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
|
||||||
version 2.0, available at
|
|
||||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
|
||||||
enforcement ladder](https://github.com/mozilla/diversity).
|
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
|
||||||
https://www.contributor-covenant.org/faq. Translations are available at
|
|
||||||
https://www.contributor-covenant.org/translations.
|
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
<img src="docs/assets/invoke_ai_banner.png" align="center">
|
|
||||||
|
|
||||||
Invoke-AI is a community of software developers, researchers, and user
|
|
||||||
interface experts who have come together on a voluntary basis to build
|
|
||||||
software tools which support cutting edge AI text-to-image
|
|
||||||
applications. This community is open to anyone who wishes to
|
|
||||||
contribute to the effort and has the skill and time to do so.
|
|
||||||
|
|
||||||
# Our Values
|
|
||||||
|
|
||||||
The InvokeAI team is a diverse community which includes individuals
|
|
||||||
from various parts of the world and many walks of life. Despite our
|
|
||||||
differences, we share a number of core values which we ask prospective
|
|
||||||
contributors to understand and respect. We believe:
|
|
||||||
|
|
||||||
1. That Open Source Software is a positive force in the world. We
|
|
||||||
create software that can be used, reused, and redistributed, without
|
|
||||||
restrictions, under a straightforward Open Source license (MIT). We
|
|
||||||
believe that Open Source benefits society as a whole by increasing the
|
|
||||||
availability of high quality software to all.
|
|
||||||
|
|
||||||
2. That those who create software should receive proper attribution
|
|
||||||
for their creative work. While we support the exchange and reuse of
|
|
||||||
Open Source Software, we feel strongly that the original authors of a
|
|
||||||
piece of code should receive credit for their contribution, and we
|
|
||||||
endeavor to do so whenever possible.
|
|
||||||
|
|
||||||
3. That there is moral ambiguity surrounding AI-assisted art. We are
|
|
||||||
aware of the moral and ethical issues surrounding the release of the
|
|
||||||
Stable Diffusion model and similar products. We are aware that, due to
|
|
||||||
the composition of their training sets, current AI-generated image
|
|
||||||
models are biased against certain ethnic groups, cultural concepts of
|
|
||||||
beauty, ethnic stereotypes, and gender roles.
|
|
||||||
|
|
||||||
1. We recognize the potential for harm to these groups that these biases
|
|
||||||
represent and trust that future AI models will take steps towards
|
|
||||||
reducing or eliminating the biases noted above, respect and give due
|
|
||||||
credit to the artists whose work is sourced, and call on developers
|
|
||||||
and users to favor these models over the older ones as they become
|
|
||||||
available.
|
|
||||||
|
|
||||||
4. We are deeply committed to ensuring that this technology benefits
|
|
||||||
everyone, including artists. We see AI art not as a replacement for
|
|
||||||
the artist, but rather as a tool to empower them. With that
|
|
||||||
in mind, we are constantly debating how to build systems that put
|
|
||||||
artists’ needs first: tools which can be readily integrated into an
|
|
||||||
artist’s existing workflows and practices, enhancing their work and
|
|
||||||
helping them to push it further. Every decision we take as a team,
|
|
||||||
which includes several artists, aims to build towards that goal.
|
|
||||||
|
|
||||||
5. That artificial intelligence can be a force for good in the world,
|
|
||||||
but must be used responsibly. Artificial intelligence technologies
|
|
||||||
have the potential to improve society, in everything from cancer care,
|
|
||||||
to customer service, to creative writing.
|
|
||||||
|
|
||||||
1. While we do not believe that software should arbitrarily limit what
|
|
||||||
users can do with it, we recognize that when used irresponsibly, AI
|
|
||||||
has the potential to do much harm. Our Discord server is actively
|
|
||||||
moderated in order to minimize the potential of harm from
|
|
||||||
user-contributed images. In addition, we ask users of our software to
|
|
||||||
refrain from using it in any way that would cause mental, emotional or
|
|
||||||
physical harm to individuals and vulnerable populations including (but
|
|
||||||
not limited to) women; minors; ethnic minorities; religious groups;
|
|
||||||
members of LGBTQIA communities; and people with disabilities or
|
|
||||||
impairments.
|
|
||||||
|
|
||||||
2. Note that some of the image generation AI models which the Invoke-AI
|
|
||||||
toolkit supports carry licensing agreements which impose restrictions
|
|
||||||
on how the model is used. We ask that our users read and agree to
|
|
||||||
these terms if they wish to make use of these models. These agreements
|
|
||||||
are distinct from the MIT license which applies to the InvokeAI
|
|
||||||
software and source code.
|
|
||||||
|
|
||||||
6. That mutual respect is key to a healthy software development
|
|
||||||
community. Members of the InvokeAI community are expected to treat
|
|
||||||
each other with respect, beneficence, and empathy. Each of us has a
|
|
||||||
different background and a unique set of skills. We strive to help
|
|
||||||
each other grow and gain new skills, and we apportion expectations in
|
|
||||||
a way that balances the members' time, skillset, and interest
|
|
||||||
area. Disputes are resolved by open and honest communication.
|
|
||||||
|
|
||||||
## Signature
|
|
||||||
|
|
||||||
This document has been collectively crafted and approved by the current InvokeAI team members, as of 28 Nov 2022: **lstein** (Lincoln Stein), **blessedcoolant**, **hipsterusername** (Kent Keirsey), **Kyle0654** (Kyle Schouviller), **damian0815**, **mauwii** (Matthias Wild), **Netsvetaev** (Artur Netsvetaev), **psychedelicious**, **tildebyte**, **keturn**, and **ebr** (Eugene Brodsky). Although individuals within the group may hold differing views on particular details and/or their implications, we are all in agreement about its fundamental statements, as well as their significance and importance to this project moving forward.
|
|
||||||
107
README.md
@@ -1,9 +1,11 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
|
_Formerly known as lstein/stable-diffusion_
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||||
@@ -36,33 +38,18 @@ This is a fork of
|
|||||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||||
the open source text-to-image generator. It provides a streamlined
|
the open source text-to-image generator. It provides a streamlined
|
||||||
process with various new features and options to aid the image
|
process with various new features and options to aid the image
|
||||||
generation process. It runs on Windows, macOS and Linux machines, with
|
generation process. It runs on Windows, Mac and Linux machines, with
|
||||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||||
Web interface (see below), and an easy-to-use command-line interface.
|
Web interface (see below), and an easy-to-use command-line interface.
|
||||||
|
|
||||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
_Note: InvokeAI is rapidly evolving. Please use the
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
|
||||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
|
||||||
|
|
||||||
# Getting Started with InvokeAI
|
|
||||||
|
|
||||||
For full installation and upgrade instructions, please see:
|
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
|
||||||
3. Unzip the file.
|
|
||||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
|
||||||
5. Wait a while, until it is done.
|
|
||||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
|
||||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
|
||||||
8. Type `banana sushi` in the box on the top left and click `Invoke`:
|
|
||||||
|
|
||||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
|
|
||||||
|
_Note: This fork is rapidly evolving. Please use the
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
|
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
@@ -78,17 +65,17 @@ For full installation and upgrade instructions, please see:
|
|||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
This fork is supported across multiple platforms. You can find individual installation instructions
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
below.
|
||||||
AMD card (using the ROCm driver). For full installation and upgrade
|
|
||||||
instructions, please see:
|
- #### [Linux](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_LINUX/)
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
|
||||||
|
- #### [Windows](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_WINDOWS/)
|
||||||
|
|
||||||
|
- #### [Macintosh](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_MAC/)
|
||||||
|
|
||||||
### Hardware Requirements
|
### Hardware Requirements
|
||||||
|
|
||||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
|
||||||
AMD card (using the ROCm driver).
|
|
||||||
#### System
|
#### System
|
||||||
|
|
||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
@@ -96,10 +83,6 @@ You wil need one of the following:
|
|||||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- An Apple computer with an M1 chip.
|
- An Apple computer with an M1 chip.
|
||||||
|
|
||||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
|
||||||
unable to run in half-precision mode and do not have sufficient VRAM
|
|
||||||
to render 512x512 images.
|
|
||||||
|
|
||||||
#### Memory
|
#### Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
@@ -117,12 +100,11 @@ Similarly, specify full-precision mode on Apple M1 hardware.
|
|||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter
|
Precision is auto configured based on the device. If however you encounter
|
||||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||||
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
|
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||||
```
|
```
|
||||||
Or by updating your InvokeAI configuration file with this argument.
|
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
@@ -151,7 +133,39 @@ Or by updating your InvokeAI configuration file with this argument.
|
|||||||
|
|
||||||
### Latest Changes
|
### Latest Changes
|
||||||
|
|
||||||
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
|
- v2.0.1 (13 October 2022)
|
||||||
|
- fix noisy images at high step count when using k* samplers
|
||||||
|
- dream.py script now calls invoke.py module directly rather than
|
||||||
|
via a new python process (which could break the environment)
|
||||||
|
|
||||||
|
- v2.0.0 (9 October 2022)
|
||||||
|
|
||||||
|
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||||
|
for backward compatibility.
|
||||||
|
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||||
|
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||||
|
- img2img runs on all k* samplers
|
||||||
|
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||||
|
- Support for CodeFormer face reconstruction
|
||||||
|
- Support for Textual Inversion on Macintoshes
|
||||||
|
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||||
|
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||||
|
and "embiggen" upscaling. See the `!fix` command.
|
||||||
|
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||||
|
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||||
|
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||||
|
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||||
|
and tweaking of previous settings.
|
||||||
|
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||||
|
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||||
|
New commands added:
|
||||||
|
- List command-line history with `!history`
|
||||||
|
- Search command-line history with `!search`
|
||||||
|
- Clear history with `!clear`
|
||||||
|
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||||
|
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||||
|
|
||||||
|
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
@@ -161,19 +175,14 @@ problems and other issues.
|
|||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
cleanup, testing, or code reviews, is very much encouraged to do so. If you are unfamiliar with how
|
||||||
|
|
||||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
|
||||||
|
|
||||||
If you are unfamiliar with how
|
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||||
|
|
||||||
We hope you enjoy using our software as much as we enjoy creating it,
|
A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
||||||
and we hope that some of those of you who are reading this will elect
|
important thing is to **make your pull request against the "development" branch**, and not against
|
||||||
to become part of our community.
|
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
||||||
|
changes.
|
||||||
Welcome to InvokeAI!
|
|
||||||
|
|
||||||
### Contributors
|
### Contributors
|
||||||
|
|
||||||
@@ -186,7 +195,7 @@ their time, hard work and effort.
|
|||||||
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
||||||
email if you use and like the script.
|
email if you use and like the script.
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2022
|
Original portions of the software are Copyright (c) 2020
|
||||||
[Lincoln D. Stein](https://github.com/lstein)
|
[Lincoln D. Stein](https://github.com/lstein)
|
||||||
|
|
||||||
### Further Reading
|
### Further Reading
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ This model card focuses on the model associated with the Stable Diffusion model,
|
|||||||
|
|
||||||
# Uses
|
# Uses
|
||||||
|
|
||||||
## Direct Use
|
## Direct Use
|
||||||
The model is intended for research purposes only. Possible research areas and
|
The model is intended for research purposes only. Possible research areas and
|
||||||
tasks include
|
tasks include
|
||||||
|
|
||||||
@@ -68,11 +68,11 @@ Using the model to generate content that is cruel to individuals is a misuse of
|
|||||||
considerations.
|
considerations.
|
||||||
|
|
||||||
### Bias
|
### Bias
|
||||||
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
|
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
|
||||||
Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
|
Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
|
||||||
which consists of images that are primarily limited to English descriptions.
|
which consists of images that are primarily limited to English descriptions.
|
||||||
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
|
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
|
||||||
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
|
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
|
||||||
ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
|
ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
|
||||||
|
|
||||||
|
|
||||||
@@ -84,7 +84,7 @@ The model developers used the following dataset for training the model:
|
|||||||
- LAION-2B (en) and subsets thereof (see next section)
|
- LAION-2B (en) and subsets thereof (see next section)
|
||||||
|
|
||||||
**Training Procedure**
|
**Training Procedure**
|
||||||
Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
|
Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
|
||||||
|
|
||||||
- Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
|
- Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
|
||||||
- Text prompts are encoded through a ViT-L/14 text-encoder.
|
- Text prompts are encoded through a ViT-L/14 text-encoder.
|
||||||
@@ -108,12 +108,12 @@ filtered to images with an original size `>= 512x512`, estimated aesthetics scor
|
|||||||
- **Batch:** 32 x 8 x 2 x 4 = 2048
|
- **Batch:** 32 x 8 x 2 x 4 = 2048
|
||||||
- **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant
|
- **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant
|
||||||
|
|
||||||
## Evaluation Results
|
## Evaluation Results
|
||||||
Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
|
Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
|
||||||
5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
|
5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
|
||||||
steps show the relative improvements of the checkpoints:
|
steps show the relative improvements of the checkpoints:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores.
|
Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores.
|
||||||
## Environmental Impact
|
## Environmental Impact
|
||||||
|
|||||||
@@ -1,117 +0,0 @@
|
|||||||
from PIL import Image, ImageChops
|
|
||||||
from PIL.Image import Image as ImageType
|
|
||||||
from typing import Union, Literal
|
|
||||||
|
|
||||||
# https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent
|
|
||||||
def check_for_any_transparency(img: Union[ImageType, str]) -> bool:
|
|
||||||
if type(img) is str:
|
|
||||||
img = Image.open(str)
|
|
||||||
|
|
||||||
if img.info.get("transparency", None) is not None:
|
|
||||||
return True
|
|
||||||
if img.mode == "P":
|
|
||||||
transparent = img.info.get("transparency", -1)
|
|
||||||
for _, index in img.getcolors():
|
|
||||||
if index == transparent:
|
|
||||||
return True
|
|
||||||
elif img.mode == "RGBA":
|
|
||||||
extrema = img.getextrema()
|
|
||||||
if extrema[3][0] < 255:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_canvas_generation_mode(
|
|
||||||
init_img: Union[ImageType, str], init_mask: Union[ImageType, str]
|
|
||||||
) -> Literal["txt2img", "outpainting", "inpainting", "img2img",]:
|
|
||||||
if type(init_img) is str:
|
|
||||||
init_img = Image.open(init_img)
|
|
||||||
|
|
||||||
if type(init_mask) is str:
|
|
||||||
init_mask = Image.open(init_mask)
|
|
||||||
|
|
||||||
init_img = init_img.convert("RGBA")
|
|
||||||
|
|
||||||
# Get alpha from init_img
|
|
||||||
init_img_alpha = init_img.split()[-1]
|
|
||||||
init_img_alpha_mask = init_img_alpha.convert("L")
|
|
||||||
init_img_has_transparency = check_for_any_transparency(init_img)
|
|
||||||
|
|
||||||
if init_img_has_transparency:
|
|
||||||
init_img_is_fully_transparent = (
|
|
||||||
True if init_img_alpha_mask.getbbox() is None else False
|
|
||||||
)
|
|
||||||
|
|
||||||
"""
|
|
||||||
Mask images are white in areas where no change should be made, black where changes
|
|
||||||
should be made.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Fit the mask to init_img's size and convert it to greyscale
|
|
||||||
init_mask = init_mask.resize(init_img.size).convert("L")
|
|
||||||
|
|
||||||
"""
|
|
||||||
PIL.Image.getbbox() returns the bounding box of non-zero areas of the image, so we first
|
|
||||||
invert the mask image so that masked areas are white and other areas black == zero.
|
|
||||||
getbbox() now tells us if the are any masked areas.
|
|
||||||
"""
|
|
||||||
init_mask_bbox = ImageChops.invert(init_mask).getbbox()
|
|
||||||
init_mask_exists = False if init_mask_bbox is None else True
|
|
||||||
|
|
||||||
if init_img_has_transparency:
|
|
||||||
if init_img_is_fully_transparent:
|
|
||||||
return "txt2img"
|
|
||||||
else:
|
|
||||||
return "outpainting"
|
|
||||||
else:
|
|
||||||
if init_mask_exists:
|
|
||||||
return "inpainting"
|
|
||||||
else:
|
|
||||||
return "img2img"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Testing
|
|
||||||
init_img_opaque = "test_images/init-img_opaque.png"
|
|
||||||
init_img_partial_transparency = "test_images/init-img_partial_transparency.png"
|
|
||||||
init_img_full_transparency = "test_images/init-img_full_transparency.png"
|
|
||||||
init_mask_no_mask = "test_images/init-mask_no_mask.png"
|
|
||||||
init_mask_has_mask = "test_images/init-mask_has_mask.png"
|
|
||||||
|
|
||||||
print(
|
|
||||||
"OPAQUE IMAGE, NO MASK, expect img2img, got ",
|
|
||||||
get_canvas_generation_mode(init_img_opaque, init_mask_no_mask),
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"IMAGE WITH TRANSPARENCY, NO MASK, expect outpainting, got ",
|
|
||||||
get_canvas_generation_mode(
|
|
||||||
init_img_partial_transparency, init_mask_no_mask
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"FULLY TRANSPARENT IMAGE NO MASK, expect txt2img, got ",
|
|
||||||
get_canvas_generation_mode(init_img_full_transparency, init_mask_no_mask),
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"OPAQUE IMAGE, WITH MASK, expect inpainting, got ",
|
|
||||||
get_canvas_generation_mode(init_img_opaque, init_mask_has_mask),
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"IMAGE WITH TRANSPARENCY, WITH MASK, expect outpainting, got ",
|
|
||||||
get_canvas_generation_mode(
|
|
||||||
init_img_partial_transparency, init_mask_has_mask
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"FULLY TRANSPARENT IMAGE WITH MASK, expect txt2img, got ",
|
|
||||||
get_canvas_generation_mode(init_img_full_transparency, init_mask_has_mask),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -5,8 +5,6 @@ SAMPLER_CHOICES = [
|
|||||||
"ddim",
|
"ddim",
|
||||||
"k_dpm_2_a",
|
"k_dpm_2_a",
|
||||||
"k_dpm_2",
|
"k_dpm_2",
|
||||||
"k_dpmpp_2_a",
|
|
||||||
"k_dpmpp_2",
|
|
||||||
"k_euler_a",
|
"k_euler_a",
|
||||||
"k_euler",
|
"k_euler",
|
||||||
"k_heun",
|
"k_heun",
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 2.7 KiB |
|
Before Width: | Height: | Size: 292 KiB |
|
Before Width: | Height: | Size: 164 KiB |
|
Before Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 3.4 KiB |
@@ -1,36 +0,0 @@
|
|||||||
@echo off
|
|
||||||
|
|
||||||
PUSHD "%~dp0"
|
|
||||||
call .venv\Scripts\activate.bat
|
|
||||||
|
|
||||||
echo Do you want to generate images using the
|
|
||||||
echo 1. command-line
|
|
||||||
echo 2. browser-based UI
|
|
||||||
echo OR
|
|
||||||
echo 3. open the developer console
|
|
||||||
set /p choice="Please enter 1, 2 or 3: "
|
|
||||||
if /i "%choice%" == "1" (
|
|
||||||
echo Starting the InvokeAI command-line.
|
|
||||||
.venv\Scripts\python scripts\invoke.py %*
|
|
||||||
) else if /i "%choice%" == "2" (
|
|
||||||
echo Starting the InvokeAI browser-based UI.
|
|
||||||
.venv\Scripts\python scripts\invoke.py --web %*
|
|
||||||
) else if /i "%choice%" == "3" (
|
|
||||||
echo Developer Console
|
|
||||||
echo Python command is:
|
|
||||||
where python
|
|
||||||
echo Python version is:
|
|
||||||
python --version
|
|
||||||
echo *************************
|
|
||||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
|
||||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
|
||||||
echo *************************
|
|
||||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
|
||||||
call cmd /k
|
|
||||||
) else (
|
|
||||||
echo Invalid selection
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
deactivate
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
. .venv/bin/activate
|
|
||||||
|
|
||||||
# set required env var for torch on mac MPS
|
|
||||||
if [ "$(uname -s)" == "Darwin" ]; then
|
|
||||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Do you want to generate images using the"
|
|
||||||
echo "1. command-line"
|
|
||||||
echo "2. browser-based UI"
|
|
||||||
echo "OR"
|
|
||||||
echo "3. open the developer console"
|
|
||||||
echo "Please enter 1, 2, or 3:"
|
|
||||||
read choice
|
|
||||||
|
|
||||||
case $choice in
|
|
||||||
1)
|
|
||||||
printf "\nStarting the InvokeAI command-line..\n";
|
|
||||||
.venv/bin/python scripts/invoke.py $*;
|
|
||||||
;;
|
|
||||||
2)
|
|
||||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
|
||||||
.venv/bin/python scripts/invoke.py --web $*;
|
|
||||||
;;
|
|
||||||
3)
|
|
||||||
printf "\nDeveloper Console:\n";
|
|
||||||
printf "Python command is:\n\t";
|
|
||||||
which python;
|
|
||||||
printf "Python version is:\n\t";
|
|
||||||
python --version;
|
|
||||||
echo "*************************"
|
|
||||||
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
|
|
||||||
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
|
|
||||||
printf "*************************\n"
|
|
||||||
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
|
|
||||||
/usr/bin/env "$SHELL";
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Invalid selection";
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
InvokeAI
|
|
||||||
|
|
||||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
|
||||||
|
|
||||||
Installation on Windows:
|
|
||||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
|
||||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
|
||||||
file. Note that you will need to have admin privileges in order to
|
|
||||||
do this.
|
|
||||||
|
|
||||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
|
||||||
|
|
||||||
Installation on Linux and Mac:
|
|
||||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
|
||||||
|
|
||||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
|
||||||
file (on Linux/Mac) to start InvokeAI.
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
--prefer-binary
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
|
||||||
--trusted-host https://download.pytorch.org
|
|
||||||
accelerate~=0.14
|
|
||||||
albumentations
|
|
||||||
diffusers
|
|
||||||
eventlet
|
|
||||||
flask_cors
|
|
||||||
flask_socketio
|
|
||||||
flaskwebgui==1.0.3
|
|
||||||
getpass_asterisk
|
|
||||||
imageio-ffmpeg
|
|
||||||
pyreadline3
|
|
||||||
realesrgan
|
|
||||||
send2trash
|
|
||||||
streamlit
|
|
||||||
taming-transformers-rom1504
|
|
||||||
test-tube
|
|
||||||
torch-fidelity
|
|
||||||
torch==1.12.1 ; platform_system == 'Darwin'
|
|
||||||
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
|
||||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
|
||||||
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
|
||||||
transformers
|
|
||||||
picklescan
|
|
||||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
|
||||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
|
||||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
|
||||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
|
||||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
|
||||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
stable-diffusion-1.5:
|
|
||||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
|
||||||
repo_id: runwayml/stable-diffusion-v1-5
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: v1-5-pruned-emaonly.ckpt
|
|
||||||
recommended: true
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
inpainting-1.5:
|
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
|
|
||||||
repo_id: runwayml/stable-diffusion-inpainting
|
|
||||||
config: v1-inpainting-inference.yaml
|
|
||||||
file: sd-v1-5-inpainting.ckpt
|
|
||||||
recommended: True
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
ft-mse-improved-autoencoder-840000:
|
|
||||||
description: StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB)
|
|
||||||
repo_id: stabilityai/sd-vae-ft-mse-original
|
|
||||||
config: VAE/default
|
|
||||||
file: vae-ft-mse-840000-ema-pruned.ckpt
|
|
||||||
recommended: True
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
stable-diffusion-1.4:
|
|
||||||
description: The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
|
||||||
repo_id: CompVis/stable-diffusion-v-1-4-original
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: sd-v1-4.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
waifu-diffusion-1.3:
|
|
||||||
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
|
||||||
repo_id: hakurei/waifu-diffusion-v1-3
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: model-epoch09-float32.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
trinart-2.0:
|
|
||||||
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures (2.13 GB)
|
|
||||||
repo_id: naclbit/trinart_stable_diffusion_v2
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: trinart2_step95000.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
trinart_characters-1.0:
|
|
||||||
description: An SD model finetuned with 19.2M anime/manga style images (2.13 GB)
|
|
||||||
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: trinart_characters_it4_v1.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
trinart_vae:
|
|
||||||
description: Custom autoencoder for trinart_characters
|
|
||||||
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
|
|
||||||
config: VAE/trinart
|
|
||||||
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
papercut-1.0:
|
|
||||||
description: SD 1.5 fine-tuned for papercut art (use "PaperCut" in your prompts) (2.13 GB)
|
|
||||||
repo_id: Fictiverse/Stable_Diffusion_PaperCut_Model
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: PaperCut_v1.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
voxel_art-1.0:
|
|
||||||
description: Stable Diffusion trained on voxel art (use "VoxelArt" in your prompts) (4.27 GB)
|
|
||||||
repo_id: Fictiverse/Stable_Diffusion_VoxelArt_Model
|
|
||||||
config: v1-inference.yaml
|
|
||||||
file: VoxelArt_v1.ckpt
|
|
||||||
recommended: False
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
@@ -7,8 +7,8 @@
|
|||||||
# was trained on.
|
# was trained on.
|
||||||
stable-diffusion-1.5:
|
stable-diffusion-1.5:
|
||||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||||
weights: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
weights: ./models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||||
config: configs/stable-diffusion/v1-inference.yaml
|
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||||
width: 512
|
width: 512
|
||||||
height: 512
|
height: 512
|
||||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
@@ -25,5 +25,3 @@ inpainting-1.5:
|
|||||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting
|
description: RunwayML SD 1.5 model optimized for inpainting
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
|
|||||||
@@ -1,803 +0,0 @@
|
|||||||
sd-concepts-library/001glitch-core
|
|
||||||
sd-concepts-library/2814-roth
|
|
||||||
sd-concepts-library/3d-female-cyborgs
|
|
||||||
sd-concepts-library/4tnght
|
|
||||||
sd-concepts-library/80s-anime-ai
|
|
||||||
sd-concepts-library/80s-anime-ai-being
|
|
||||||
sd-concepts-library/852style-girl
|
|
||||||
sd-concepts-library/8bit
|
|
||||||
sd-concepts-library/8sconception
|
|
||||||
sd-concepts-library/Aflac-duck
|
|
||||||
sd-concepts-library/Akitsuki
|
|
||||||
sd-concepts-library/Atako
|
|
||||||
sd-concepts-library/Exodus-Styling
|
|
||||||
sd-concepts-library/RINGAO
|
|
||||||
sd-concepts-library/a-female-hero-from-the-legend-of-mir
|
|
||||||
sd-concepts-library/a-hat-kid
|
|
||||||
sd-concepts-library/a-tale-of-two-empires
|
|
||||||
sd-concepts-library/aadhav-face
|
|
||||||
sd-concepts-library/aavegotchi
|
|
||||||
sd-concepts-library/abby-face
|
|
||||||
sd-concepts-library/abstract-concepts
|
|
||||||
sd-concepts-library/accurate-angel
|
|
||||||
sd-concepts-library/agm-style-nao
|
|
||||||
sd-concepts-library/aj-fosik
|
|
||||||
sd-concepts-library/alberto-mielgo
|
|
||||||
sd-concepts-library/alex-portugal
|
|
||||||
sd-concepts-library/alex-thumbnail-object-2000-steps
|
|
||||||
sd-concepts-library/aleyna-tilki
|
|
||||||
sd-concepts-library/alf
|
|
||||||
sd-concepts-library/alicebeta
|
|
||||||
sd-concepts-library/alien-avatar
|
|
||||||
sd-concepts-library/alisa
|
|
||||||
sd-concepts-library/all-rings-albuns
|
|
||||||
sd-concepts-library/altvent
|
|
||||||
sd-concepts-library/altyn-helmet
|
|
||||||
sd-concepts-library/amine
|
|
||||||
sd-concepts-library/amogus
|
|
||||||
sd-concepts-library/anders-zorn
|
|
||||||
sd-concepts-library/angus-mcbride-style
|
|
||||||
sd-concepts-library/animalve3-1500seq
|
|
||||||
sd-concepts-library/anime-background-style
|
|
||||||
sd-concepts-library/anime-background-style-v2
|
|
||||||
sd-concepts-library/anime-boy
|
|
||||||
sd-concepts-library/anime-girl
|
|
||||||
sd-concepts-library/anyXtronXredshift
|
|
||||||
sd-concepts-library/anya-forger
|
|
||||||
sd-concepts-library/apex-wingman
|
|
||||||
sd-concepts-library/apulian-rooster-v0-1
|
|
||||||
sd-concepts-library/arcane-face
|
|
||||||
sd-concepts-library/arcane-style-jv
|
|
||||||
sd-concepts-library/arcimboldo-style
|
|
||||||
sd-concepts-library/armando-reveron-style
|
|
||||||
sd-concepts-library/armor-concept
|
|
||||||
sd-concepts-library/arq-render
|
|
||||||
sd-concepts-library/art-brut
|
|
||||||
sd-concepts-library/arthur1
|
|
||||||
sd-concepts-library/artist-yukiko-kanagai
|
|
||||||
sd-concepts-library/arwijn
|
|
||||||
sd-concepts-library/ashiok
|
|
||||||
sd-concepts-library/at-wolf-boy-object
|
|
||||||
sd-concepts-library/atm-ant
|
|
||||||
sd-concepts-library/atm-ant-2
|
|
||||||
sd-concepts-library/axe-tattoo
|
|
||||||
sd-concepts-library/ayush-spider-spr
|
|
||||||
sd-concepts-library/azura-from-vibrant-venture
|
|
||||||
sd-concepts-library/ba-shiroko
|
|
||||||
sd-concepts-library/babau
|
|
||||||
sd-concepts-library/babs-bunny
|
|
||||||
sd-concepts-library/babushork
|
|
||||||
sd-concepts-library/backrooms
|
|
||||||
sd-concepts-library/bad_Hub_Hugh
|
|
||||||
sd-concepts-library/bada-club
|
|
||||||
sd-concepts-library/baldi
|
|
||||||
sd-concepts-library/baluchitherian
|
|
||||||
sd-concepts-library/bamse
|
|
||||||
sd-concepts-library/bamse-og-kylling
|
|
||||||
sd-concepts-library/bee
|
|
||||||
sd-concepts-library/beholder
|
|
||||||
sd-concepts-library/beldam
|
|
||||||
sd-concepts-library/belen
|
|
||||||
sd-concepts-library/bella-goth
|
|
||||||
sd-concepts-library/belle-delphine
|
|
||||||
sd-concepts-library/bert-muppet
|
|
||||||
sd-concepts-library/better-collage3
|
|
||||||
sd-concepts-library/between2-mt-fade
|
|
||||||
sd-concepts-library/birb-style
|
|
||||||
sd-concepts-library/black-and-white-design
|
|
||||||
sd-concepts-library/black-waifu
|
|
||||||
sd-concepts-library/bloo
|
|
||||||
sd-concepts-library/blue-haired-boy
|
|
||||||
sd-concepts-library/blue-zombie
|
|
||||||
sd-concepts-library/blue-zombiee
|
|
||||||
sd-concepts-library/bluebey
|
|
||||||
sd-concepts-library/bluebey-2
|
|
||||||
sd-concepts-library/bobs-burgers
|
|
||||||
sd-concepts-library/boissonnard
|
|
||||||
sd-concepts-library/bonzi-monkey
|
|
||||||
sd-concepts-library/borderlands
|
|
||||||
sd-concepts-library/bored-ape-textual-inversion
|
|
||||||
sd-concepts-library/boris-anderson
|
|
||||||
sd-concepts-library/bozo-22
|
|
||||||
sd-concepts-library/breakcore
|
|
||||||
sd-concepts-library/brittney-williams-art
|
|
||||||
sd-concepts-library/bruma
|
|
||||||
sd-concepts-library/brunnya
|
|
||||||
sd-concepts-library/buddha-statue
|
|
||||||
sd-concepts-library/bullvbear
|
|
||||||
sd-concepts-library/button-eyes
|
|
||||||
sd-concepts-library/canadian-goose
|
|
||||||
sd-concepts-library/canary-cap
|
|
||||||
sd-concepts-library/cancer_style
|
|
||||||
sd-concepts-library/captain-haddock
|
|
||||||
sd-concepts-library/captainkirb
|
|
||||||
sd-concepts-library/car-toy-rk
|
|
||||||
sd-concepts-library/carasibana
|
|
||||||
sd-concepts-library/carlitos-el-mago
|
|
||||||
sd-concepts-library/carrascharacter
|
|
||||||
sd-concepts-library/cartoona-animals
|
|
||||||
sd-concepts-library/cat-toy
|
|
||||||
sd-concepts-library/centaur
|
|
||||||
sd-concepts-library/cgdonny1
|
|
||||||
sd-concepts-library/cham
|
|
||||||
sd-concepts-library/chandra-nalaar
|
|
||||||
sd-concepts-library/char-con
|
|
||||||
sd-concepts-library/character-pingu
|
|
||||||
sd-concepts-library/cheburashka
|
|
||||||
sd-concepts-library/chen-1
|
|
||||||
sd-concepts-library/child-zombie
|
|
||||||
sd-concepts-library/chillpill
|
|
||||||
sd-concepts-library/chonkfrog
|
|
||||||
sd-concepts-library/chop
|
|
||||||
sd-concepts-library/christo-person
|
|
||||||
sd-concepts-library/chuck-walton
|
|
||||||
sd-concepts-library/chucky
|
|
||||||
sd-concepts-library/chungus-poodl-pet
|
|
||||||
sd-concepts-library/cindlop
|
|
||||||
sd-concepts-library/collage-cutouts
|
|
||||||
sd-concepts-library/collage14
|
|
||||||
sd-concepts-library/collage3
|
|
||||||
sd-concepts-library/collage3-hubcity
|
|
||||||
sd-concepts-library/cologne
|
|
||||||
sd-concepts-library/color-page
|
|
||||||
sd-concepts-library/colossus
|
|
||||||
sd-concepts-library/command-and-conquer-remastered-cameos
|
|
||||||
sd-concepts-library/concept-art
|
|
||||||
sd-concepts-library/conner-fawcett-style
|
|
||||||
sd-concepts-library/conway-pirate
|
|
||||||
sd-concepts-library/coop-himmelblau
|
|
||||||
sd-concepts-library/coraline
|
|
||||||
sd-concepts-library/cornell-box
|
|
||||||
sd-concepts-library/cortana
|
|
||||||
sd-concepts-library/covid-19-rapid-test
|
|
||||||
sd-concepts-library/cow-uwu
|
|
||||||
sd-concepts-library/cowboy
|
|
||||||
sd-concepts-library/crazy-1
|
|
||||||
sd-concepts-library/crazy-2
|
|
||||||
sd-concepts-library/crb-portraits
|
|
||||||
sd-concepts-library/crb-surrealz
|
|
||||||
sd-concepts-library/crbart
|
|
||||||
sd-concepts-library/crested-gecko
|
|
||||||
sd-concepts-library/crinos-form-garou
|
|
||||||
sd-concepts-library/cry-baby-style
|
|
||||||
sd-concepts-library/crybaby-style-2-0
|
|
||||||
sd-concepts-library/csgo-awp-object
|
|
||||||
sd-concepts-library/csgo-awp-texture-map
|
|
||||||
sd-concepts-library/cubex
|
|
||||||
sd-concepts-library/cumbia-peruana
|
|
||||||
sd-concepts-library/cute-bear
|
|
||||||
sd-concepts-library/cute-cat
|
|
||||||
sd-concepts-library/cute-game-style
|
|
||||||
sd-concepts-library/cyberpunk-lucy
|
|
||||||
sd-concepts-library/dabotap
|
|
||||||
sd-concepts-library/dan-mumford
|
|
||||||
sd-concepts-library/dan-seagrave-art-style
|
|
||||||
sd-concepts-library/dark-penguin-pinguinanimations
|
|
||||||
sd-concepts-library/darkpenguinanimatronic
|
|
||||||
sd-concepts-library/darkplane
|
|
||||||
sd-concepts-library/david-firth-artstyle
|
|
||||||
sd-concepts-library/david-martinez-cyberpunk
|
|
||||||
sd-concepts-library/david-martinez-edgerunners
|
|
||||||
sd-concepts-library/david-moreno-architecture
|
|
||||||
sd-concepts-library/daycare-attendant-sun-fnaf
|
|
||||||
sd-concepts-library/ddattender
|
|
||||||
sd-concepts-library/degods
|
|
||||||
sd-concepts-library/degodsheavy
|
|
||||||
sd-concepts-library/depthmap
|
|
||||||
sd-concepts-library/depthmap-style
|
|
||||||
sd-concepts-library/design
|
|
||||||
sd-concepts-library/detectivedinosaur1
|
|
||||||
sd-concepts-library/diaosu-toy
|
|
||||||
sd-concepts-library/dicoo
|
|
||||||
sd-concepts-library/dicoo2
|
|
||||||
sd-concepts-library/dishonored-portrait-styles
|
|
||||||
sd-concepts-library/disquieting-muses
|
|
||||||
sd-concepts-library/ditko
|
|
||||||
sd-concepts-library/dlooak
|
|
||||||
sd-concepts-library/doc
|
|
||||||
sd-concepts-library/doener-red-line-art
|
|
||||||
sd-concepts-library/dog
|
|
||||||
sd-concepts-library/dog-django
|
|
||||||
sd-concepts-library/doge-pound
|
|
||||||
sd-concepts-library/dong-ho
|
|
||||||
sd-concepts-library/dong-ho2
|
|
||||||
sd-concepts-library/doose-s-realistic-art-style
|
|
||||||
sd-concepts-library/dq10-anrushia
|
|
||||||
sd-concepts-library/dr-livesey
|
|
||||||
sd-concepts-library/dr-strange
|
|
||||||
sd-concepts-library/dragonborn
|
|
||||||
sd-concepts-library/dreamcore
|
|
||||||
sd-concepts-library/dreamy-painting
|
|
||||||
sd-concepts-library/drive-scorpion-jacket
|
|
||||||
sd-concepts-library/dsmuses
|
|
||||||
sd-concepts-library/dtv-pkmn
|
|
||||||
sd-concepts-library/dullboy-caricature
|
|
||||||
sd-concepts-library/duranduran
|
|
||||||
sd-concepts-library/durer-style
|
|
||||||
sd-concepts-library/dyoudim-style
|
|
||||||
sd-concepts-library/early-mishima-kurone
|
|
||||||
sd-concepts-library/eastward
|
|
||||||
sd-concepts-library/eddie
|
|
||||||
sd-concepts-library/edgerunners-style
|
|
||||||
sd-concepts-library/edgerunners-style-v2
|
|
||||||
sd-concepts-library/el-salvador-style-style
|
|
||||||
sd-concepts-library/elegant-flower
|
|
||||||
sd-concepts-library/elspeth-tirel
|
|
||||||
sd-concepts-library/eru-chitanda-casual
|
|
||||||
sd-concepts-library/erwin-olaf-style
|
|
||||||
sd-concepts-library/ettblackteapot
|
|
||||||
sd-concepts-library/explosions-cat
|
|
||||||
sd-concepts-library/eye-of-agamotto
|
|
||||||
sd-concepts-library/f-22
|
|
||||||
sd-concepts-library/facadeplace
|
|
||||||
sd-concepts-library/fairy-tale-painting-style
|
|
||||||
sd-concepts-library/fairytale
|
|
||||||
sd-concepts-library/fang-yuan-001
|
|
||||||
sd-concepts-library/faraon-love-shady
|
|
||||||
sd-concepts-library/fasina
|
|
||||||
sd-concepts-library/felps
|
|
||||||
sd-concepts-library/female-kpop-singer
|
|
||||||
sd-concepts-library/fergal-cat
|
|
||||||
sd-concepts-library/filename-2
|
|
||||||
sd-concepts-library/fileteado-porteno
|
|
||||||
sd-concepts-library/final-fantasy-logo
|
|
||||||
sd-concepts-library/fireworks-over-water
|
|
||||||
sd-concepts-library/fish
|
|
||||||
sd-concepts-library/flag-ussr
|
|
||||||
sd-concepts-library/flatic
|
|
||||||
sd-concepts-library/floral
|
|
||||||
sd-concepts-library/fluid-acrylic-jellyfish-creatures-style-of-carl-ingram-art
|
|
||||||
sd-concepts-library/fnf-boyfriend
|
|
||||||
sd-concepts-library/fold-structure
|
|
||||||
sd-concepts-library/fox-purple
|
|
||||||
sd-concepts-library/fractal
|
|
||||||
sd-concepts-library/fractal-flame
|
|
||||||
sd-concepts-library/fractal-temple-style
|
|
||||||
sd-concepts-library/frank-frazetta
|
|
||||||
sd-concepts-library/franz-unterberger
|
|
||||||
sd-concepts-library/freddy-fazbear
|
|
||||||
sd-concepts-library/freefonix-style
|
|
||||||
sd-concepts-library/furrpopasthetic
|
|
||||||
sd-concepts-library/fursona
|
|
||||||
sd-concepts-library/fzk
|
|
||||||
sd-concepts-library/galaxy-explorer
|
|
||||||
sd-concepts-library/ganyu-genshin-impact
|
|
||||||
sd-concepts-library/garcon-the-cat
|
|
||||||
sd-concepts-library/garfield-pizza-plush
|
|
||||||
sd-concepts-library/garfield-pizza-plush-v2
|
|
||||||
sd-concepts-library/gba-fe-class-cards
|
|
||||||
sd-concepts-library/gba-pokemon-sprites
|
|
||||||
sd-concepts-library/geggin
|
|
||||||
sd-concepts-library/ggplot2
|
|
||||||
sd-concepts-library/ghost-style
|
|
||||||
sd-concepts-library/ghostproject-men
|
|
||||||
sd-concepts-library/gibasachan-v0
|
|
||||||
sd-concepts-library/gim
|
|
||||||
sd-concepts-library/gio
|
|
||||||
sd-concepts-library/giygas
|
|
||||||
sd-concepts-library/glass-pipe
|
|
||||||
sd-concepts-library/glass-prism-cube
|
|
||||||
sd-concepts-library/glow-forest
|
|
||||||
sd-concepts-library/goku
|
|
||||||
sd-concepts-library/gram-tops
|
|
||||||
sd-concepts-library/green-blue-shanshui
|
|
||||||
sd-concepts-library/green-tent
|
|
||||||
sd-concepts-library/grifter
|
|
||||||
sd-concepts-library/grisstyle
|
|
||||||
sd-concepts-library/grit-toy
|
|
||||||
sd-concepts-library/gt-color-paint-2
|
|
||||||
sd-concepts-library/gta5-artwork
|
|
||||||
sd-concepts-library/guttestreker
|
|
||||||
sd-concepts-library/gymnastics-leotard-v2
|
|
||||||
sd-concepts-library/half-life-2-dog
|
|
||||||
sd-concepts-library/handstand
|
|
||||||
sd-concepts-library/hanfu-anime-style
|
|
||||||
sd-concepts-library/happy-chaos
|
|
||||||
sd-concepts-library/happy-person12345
|
|
||||||
sd-concepts-library/happy-person12345-assets
|
|
||||||
sd-concepts-library/harley-quinn
|
|
||||||
sd-concepts-library/harmless-ai-1
|
|
||||||
sd-concepts-library/harmless-ai-house-style-1
|
|
||||||
sd-concepts-library/hd-emoji
|
|
||||||
sd-concepts-library/heather
|
|
||||||
sd-concepts-library/henjo-techno-show
|
|
||||||
sd-concepts-library/herge-style
|
|
||||||
sd-concepts-library/hiten-style-nao
|
|
||||||
sd-concepts-library/hitokomoru-style-nao
|
|
||||||
sd-concepts-library/hiyuki-chan
|
|
||||||
sd-concepts-library/hk-bamboo
|
|
||||||
sd-concepts-library/hk-betweenislands
|
|
||||||
sd-concepts-library/hk-bicycle
|
|
||||||
sd-concepts-library/hk-blackandwhite
|
|
||||||
sd-concepts-library/hk-breakfast
|
|
||||||
sd-concepts-library/hk-buses
|
|
||||||
sd-concepts-library/hk-clouds
|
|
||||||
sd-concepts-library/hk-goldbuddha
|
|
||||||
sd-concepts-library/hk-goldenlantern
|
|
||||||
sd-concepts-library/hk-hkisland
|
|
||||||
sd-concepts-library/hk-leaves
|
|
||||||
sd-concepts-library/hk-market
|
|
||||||
sd-concepts-library/hk-oldcamera
|
|
||||||
sd-concepts-library/hk-opencamera
|
|
||||||
sd-concepts-library/hk-peach
|
|
||||||
sd-concepts-library/hk-phonevax
|
|
||||||
sd-concepts-library/hk-streetpeople
|
|
||||||
sd-concepts-library/hk-vintage
|
|
||||||
sd-concepts-library/hoi4
|
|
||||||
sd-concepts-library/hoi4-leaders
|
|
||||||
sd-concepts-library/homestuck-sprite
|
|
||||||
sd-concepts-library/homestuck-troll
|
|
||||||
sd-concepts-library/hours-sentry-fade
|
|
||||||
sd-concepts-library/hours-style
|
|
||||||
sd-concepts-library/hrgiger-drmacabre
|
|
||||||
sd-concepts-library/huang-guang-jian
|
|
||||||
sd-concepts-library/huatli
|
|
||||||
sd-concepts-library/huayecai820-greyscale
|
|
||||||
sd-concepts-library/hub-city
|
|
||||||
sd-concepts-library/hubris-oshri
|
|
||||||
sd-concepts-library/huckleberry
|
|
||||||
sd-concepts-library/hydrasuit
|
|
||||||
sd-concepts-library/i-love-chaos
|
|
||||||
sd-concepts-library/ibere-thenorio
|
|
||||||
sd-concepts-library/ic0n
|
|
||||||
sd-concepts-library/ie-gravestone
|
|
||||||
sd-concepts-library/ikea-fabler
|
|
||||||
sd-concepts-library/illustration-style
|
|
||||||
sd-concepts-library/ilo-kunst
|
|
||||||
sd-concepts-library/ilya-shkipin
|
|
||||||
sd-concepts-library/im-poppy
|
|
||||||
sd-concepts-library/ina-art
|
|
||||||
sd-concepts-library/indian-watercolor-portraits
|
|
||||||
sd-concepts-library/indiana
|
|
||||||
sd-concepts-library/ingmar-bergman
|
|
||||||
sd-concepts-library/insidewhale
|
|
||||||
sd-concepts-library/interchanges
|
|
||||||
sd-concepts-library/inuyama-muneto-style-nao
|
|
||||||
sd-concepts-library/irasutoya
|
|
||||||
sd-concepts-library/iridescent-illustration-style
|
|
||||||
sd-concepts-library/iridescent-photo-style
|
|
||||||
sd-concepts-library/isabell-schulte-pv-pvii-3000steps
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-1-image-style
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-1024px-1500-steps-style
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-12tiles-3000steps-style
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-4-tiles-1-lr-3000-steps-style
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-4-tiles-3-lr-5000-steps-style
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-4tiles-500steps
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-4tiles-6000steps
|
|
||||||
sd-concepts-library/isabell-schulte-pviii-style
|
|
||||||
sd-concepts-library/isometric-tile-test
|
|
||||||
sd-concepts-library/jacqueline-the-unicorn
|
|
||||||
sd-concepts-library/james-web-space-telescope
|
|
||||||
sd-concepts-library/jamie-hewlett-style
|
|
||||||
sd-concepts-library/jamiels
|
|
||||||
sd-concepts-library/jang-sung-rak-style
|
|
||||||
sd-concepts-library/jetsetdreamcastcovers
|
|
||||||
sd-concepts-library/jin-kisaragi
|
|
||||||
sd-concepts-library/jinjoon-lee-they
|
|
||||||
sd-concepts-library/jm-bergling-monogram
|
|
||||||
sd-concepts-library/joe-mad
|
|
||||||
sd-concepts-library/joe-whiteford-art-style
|
|
||||||
sd-concepts-library/joemad
|
|
||||||
sd-concepts-library/john-blanche
|
|
||||||
sd-concepts-library/johnny-silverhand
|
|
||||||
sd-concepts-library/jojo-bizzare-adventure-manga-lineart
|
|
||||||
sd-concepts-library/jos-de-kat
|
|
||||||
sd-concepts-library/junji-ito-artstyle
|
|
||||||
sd-concepts-library/kaleido
|
|
||||||
sd-concepts-library/kaneoya-sachiko
|
|
||||||
sd-concepts-library/kanovt
|
|
||||||
sd-concepts-library/kanv1
|
|
||||||
sd-concepts-library/karan-gloomy
|
|
||||||
sd-concepts-library/karl-s-lzx-1
|
|
||||||
sd-concepts-library/kasumin
|
|
||||||
sd-concepts-library/kawaii-colors
|
|
||||||
sd-concepts-library/kawaii-girl-plus-object
|
|
||||||
sd-concepts-library/kawaii-girl-plus-style
|
|
||||||
sd-concepts-library/kawaii-girl-plus-style-v1-1
|
|
||||||
sd-concepts-library/kay
|
|
||||||
sd-concepts-library/kaya-ghost-assasin
|
|
||||||
sd-concepts-library/ki
|
|
||||||
sd-concepts-library/kinda-sus
|
|
||||||
sd-concepts-library/kings-quest-agd
|
|
||||||
sd-concepts-library/kiora
|
|
||||||
sd-concepts-library/kira-sensei
|
|
||||||
sd-concepts-library/kirby
|
|
||||||
sd-concepts-library/klance
|
|
||||||
sd-concepts-library/kodakvision500t
|
|
||||||
sd-concepts-library/kogatan-shiny
|
|
||||||
sd-concepts-library/kogecha
|
|
||||||
sd-concepts-library/kojima-ayami
|
|
||||||
sd-concepts-library/koko-dog
|
|
||||||
sd-concepts-library/kuvshinov
|
|
||||||
sd-concepts-library/kysa-v-style
|
|
||||||
sd-concepts-library/laala-character
|
|
||||||
sd-concepts-library/larrette
|
|
||||||
sd-concepts-library/lavko
|
|
||||||
sd-concepts-library/lazytown-stephanie
|
|
||||||
sd-concepts-library/ldr
|
|
||||||
sd-concepts-library/ldrs
|
|
||||||
sd-concepts-library/led-toy
|
|
||||||
sd-concepts-library/lego-astronaut
|
|
||||||
sd-concepts-library/leica
|
|
||||||
sd-concepts-library/leif-jones
|
|
||||||
sd-concepts-library/lex
|
|
||||||
sd-concepts-library/liliana
|
|
||||||
sd-concepts-library/liliana-vess
|
|
||||||
sd-concepts-library/liminal-spaces-2-0
|
|
||||||
sd-concepts-library/liminalspaces
|
|
||||||
sd-concepts-library/line-art
|
|
||||||
sd-concepts-library/line-style
|
|
||||||
sd-concepts-library/linnopoke
|
|
||||||
sd-concepts-library/liquid-light
|
|
||||||
sd-concepts-library/liqwid-aquafarmer
|
|
||||||
sd-concepts-library/lizardman
|
|
||||||
sd-concepts-library/loab-character
|
|
||||||
sd-concepts-library/loab-style
|
|
||||||
sd-concepts-library/lofa
|
|
||||||
sd-concepts-library/logo-with-face-on-shield
|
|
||||||
sd-concepts-library/lolo
|
|
||||||
sd-concepts-library/looney-anime
|
|
||||||
sd-concepts-library/lost-rapper
|
|
||||||
sd-concepts-library/lphr-style
|
|
||||||
sd-concepts-library/lucario
|
|
||||||
sd-concepts-library/lucky-luke
|
|
||||||
sd-concepts-library/lugal-ki-en
|
|
||||||
sd-concepts-library/luinv2
|
|
||||||
sd-concepts-library/lula-13
|
|
||||||
sd-concepts-library/lumio
|
|
||||||
sd-concepts-library/lxj-o4
|
|
||||||
sd-concepts-library/m-geo
|
|
||||||
sd-concepts-library/m-geoo
|
|
||||||
sd-concepts-library/madhubani-art
|
|
||||||
sd-concepts-library/mafalda-character
|
|
||||||
sd-concepts-library/magic-pengel
|
|
||||||
sd-concepts-library/malika-favre-art-style
|
|
||||||
sd-concepts-library/manga-style
|
|
||||||
sd-concepts-library/marbling-art
|
|
||||||
sd-concepts-library/margo
|
|
||||||
sd-concepts-library/marty
|
|
||||||
sd-concepts-library/marty6
|
|
||||||
sd-concepts-library/mass
|
|
||||||
sd-concepts-library/masyanya
|
|
||||||
sd-concepts-library/masyunya
|
|
||||||
sd-concepts-library/mate
|
|
||||||
sd-concepts-library/matthew-stone
|
|
||||||
sd-concepts-library/mattvidpro
|
|
||||||
sd-concepts-library/maurice-quentin-de-la-tour-style
|
|
||||||
sd-concepts-library/maus
|
|
||||||
sd-concepts-library/max-foley
|
|
||||||
sd-concepts-library/mayor-richard-irvin
|
|
||||||
sd-concepts-library/mechasoulall
|
|
||||||
sd-concepts-library/medazzaland
|
|
||||||
sd-concepts-library/memnarch-mtg
|
|
||||||
sd-concepts-library/metagabe
|
|
||||||
sd-concepts-library/meyoco
|
|
||||||
sd-concepts-library/meze-audio-elite-headphones
|
|
||||||
sd-concepts-library/midjourney-style
|
|
||||||
sd-concepts-library/mikako-method
|
|
||||||
sd-concepts-library/mikako-methodi2i
|
|
||||||
sd-concepts-library/miko-3-robot
|
|
||||||
sd-concepts-library/milady
|
|
||||||
sd-concepts-library/mildemelwe-style
|
|
||||||
sd-concepts-library/million-live-akane-15k
|
|
||||||
sd-concepts-library/million-live-akane-3k
|
|
||||||
sd-concepts-library/million-live-akane-shifuku-3k
|
|
||||||
sd-concepts-library/million-live-spade-q-object-3k
|
|
||||||
sd-concepts-library/million-live-spade-q-style-3k
|
|
||||||
sd-concepts-library/minecraft-concept-art
|
|
||||||
sd-concepts-library/mishima-kurone
|
|
||||||
sd-concepts-library/mizkif
|
|
||||||
sd-concepts-library/moeb-style
|
|
||||||
sd-concepts-library/moebius
|
|
||||||
sd-concepts-library/mokoko
|
|
||||||
sd-concepts-library/mokoko-seed
|
|
||||||
sd-concepts-library/monster-girl
|
|
||||||
sd-concepts-library/monster-toy
|
|
||||||
sd-concepts-library/monte-novo
|
|
||||||
sd-concepts-library/moo-moo
|
|
||||||
sd-concepts-library/morino-hon-style
|
|
||||||
sd-concepts-library/moxxi
|
|
||||||
sd-concepts-library/msg
|
|
||||||
sd-concepts-library/mtg-card
|
|
||||||
sd-concepts-library/mtl-longsky
|
|
||||||
sd-concepts-library/mu-sadr
|
|
||||||
sd-concepts-library/munch-leaks-style
|
|
||||||
sd-concepts-library/museum-by-coop-himmelblau
|
|
||||||
sd-concepts-library/muxoyara
|
|
||||||
sd-concepts-library/my-hero-academia-style
|
|
||||||
sd-concepts-library/my-mug
|
|
||||||
sd-concepts-library/mycat
|
|
||||||
sd-concepts-library/mystical-nature
|
|
||||||
sd-concepts-library/naf
|
|
||||||
sd-concepts-library/nahiri
|
|
||||||
sd-concepts-library/namine-ritsu
|
|
||||||
sd-concepts-library/naoki-saito
|
|
||||||
sd-concepts-library/nard-style
|
|
||||||
sd-concepts-library/naruto
|
|
||||||
sd-concepts-library/natasha-johnston
|
|
||||||
sd-concepts-library/nathan-wyatt
|
|
||||||
sd-concepts-library/naval-portrait
|
|
||||||
sd-concepts-library/nazuna
|
|
||||||
sd-concepts-library/nebula
|
|
||||||
sd-concepts-library/ned-flanders
|
|
||||||
sd-concepts-library/neon-pastel
|
|
||||||
sd-concepts-library/new-priests
|
|
||||||
sd-concepts-library/nic-papercuts
|
|
||||||
sd-concepts-library/nikodim
|
|
||||||
sd-concepts-library/nissa-revane
|
|
||||||
sd-concepts-library/nixeu
|
|
||||||
sd-concepts-library/noggles
|
|
||||||
sd-concepts-library/nomad
|
|
||||||
sd-concepts-library/nouns-glasses
|
|
||||||
sd-concepts-library/obama-based-on-xi
|
|
||||||
sd-concepts-library/obama-self-2
|
|
||||||
sd-concepts-library/og-mox-style
|
|
||||||
sd-concepts-library/ohisashiburi-style
|
|
||||||
sd-concepts-library/oleg-kuvaev
|
|
||||||
sd-concepts-library/olli-olli
|
|
||||||
sd-concepts-library/on-kawara
|
|
||||||
sd-concepts-library/one-line-drawing
|
|
||||||
sd-concepts-library/onepunchman
|
|
||||||
sd-concepts-library/onzpo
|
|
||||||
sd-concepts-library/orangejacket
|
|
||||||
sd-concepts-library/ori
|
|
||||||
sd-concepts-library/ori-toor
|
|
||||||
sd-concepts-library/orientalist-art
|
|
||||||
sd-concepts-library/osaka-jyo
|
|
||||||
sd-concepts-library/osaka-jyo2
|
|
||||||
sd-concepts-library/osrsmini2
|
|
||||||
sd-concepts-library/osrstiny
|
|
||||||
sd-concepts-library/other-mother
|
|
||||||
sd-concepts-library/ouroboros
|
|
||||||
sd-concepts-library/outfit-items
|
|
||||||
sd-concepts-library/overprettified
|
|
||||||
sd-concepts-library/owl-house
|
|
||||||
sd-concepts-library/painted-by-silver-of-999
|
|
||||||
sd-concepts-library/painted-by-silver-of-999-2
|
|
||||||
sd-concepts-library/painted-student
|
|
||||||
sd-concepts-library/painting
|
|
||||||
sd-concepts-library/pantone-milk
|
|
||||||
sd-concepts-library/paolo-bonolis
|
|
||||||
sd-concepts-library/party-girl
|
|
||||||
sd-concepts-library/pascalsibertin
|
|
||||||
sd-concepts-library/pastelartstyle
|
|
||||||
sd-concepts-library/paul-noir
|
|
||||||
sd-concepts-library/pen-ink-portraits-bennorthen
|
|
||||||
sd-concepts-library/phan
|
|
||||||
sd-concepts-library/phan-s-collage
|
|
||||||
sd-concepts-library/phc
|
|
||||||
sd-concepts-library/phoenix-01
|
|
||||||
sd-concepts-library/pineda-david
|
|
||||||
sd-concepts-library/pink-beast-pastelae-style
|
|
||||||
sd-concepts-library/pintu
|
|
||||||
sd-concepts-library/pion-by-august-semionov
|
|
||||||
sd-concepts-library/piotr-jablonski
|
|
||||||
sd-concepts-library/pixel-mania
|
|
||||||
sd-concepts-library/pixel-toy
|
|
||||||
sd-concepts-library/pjablonski-style
|
|
||||||
sd-concepts-library/plant-style
|
|
||||||
sd-concepts-library/plen-ki-mun
|
|
||||||
sd-concepts-library/pokemon-conquest-sprites
|
|
||||||
sd-concepts-library/pool-test
|
|
||||||
sd-concepts-library/poolrooms
|
|
||||||
sd-concepts-library/poring-ragnarok-online
|
|
||||||
sd-concepts-library/poutine-dish
|
|
||||||
sd-concepts-library/princess-knight-art
|
|
||||||
sd-concepts-library/progress-chip
|
|
||||||
sd-concepts-library/puerquis-toy
|
|
||||||
sd-concepts-library/purplefishli
|
|
||||||
sd-concepts-library/pyramidheadcosplay
|
|
||||||
sd-concepts-library/qpt-atrium
|
|
||||||
sd-concepts-library/quiesel
|
|
||||||
sd-concepts-library/r-crumb-style
|
|
||||||
sd-concepts-library/rahkshi-bionicle
|
|
||||||
sd-concepts-library/raichu
|
|
||||||
sd-concepts-library/rail-scene
|
|
||||||
sd-concepts-library/rail-scene-style
|
|
||||||
sd-concepts-library/ralph-mcquarrie
|
|
||||||
sd-concepts-library/ransom
|
|
||||||
sd-concepts-library/rayne-weynolds
|
|
||||||
sd-concepts-library/rcrumb-portraits-style
|
|
||||||
sd-concepts-library/rd-chaos
|
|
||||||
sd-concepts-library/rd-paintings
|
|
||||||
sd-concepts-library/red-glasses
|
|
||||||
sd-concepts-library/reeducation-camp
|
|
||||||
sd-concepts-library/reksio-dog
|
|
||||||
sd-concepts-library/rektguy
|
|
||||||
sd-concepts-library/remert
|
|
||||||
sd-concepts-library/renalla
|
|
||||||
sd-concepts-library/repeat
|
|
||||||
sd-concepts-library/retro-girl
|
|
||||||
sd-concepts-library/retro-mecha-rangers
|
|
||||||
sd-concepts-library/retropixelart-pinguin
|
|
||||||
sd-concepts-library/rex-deno
|
|
||||||
sd-concepts-library/rhizomuse-machine-bionic-sculpture
|
|
||||||
sd-concepts-library/ricar
|
|
||||||
sd-concepts-library/rickyart
|
|
||||||
sd-concepts-library/rico-face
|
|
||||||
sd-concepts-library/riker-doll
|
|
||||||
sd-concepts-library/rikiart
|
|
||||||
sd-concepts-library/rikiboy-art
|
|
||||||
sd-concepts-library/rilakkuma
|
|
||||||
sd-concepts-library/rishusei-style
|
|
||||||
sd-concepts-library/rj-palmer
|
|
||||||
sd-concepts-library/rl-pkmn-test
|
|
||||||
sd-concepts-library/road-to-ruin
|
|
||||||
sd-concepts-library/robertnava
|
|
||||||
sd-concepts-library/roblox-avatar
|
|
||||||
sd-concepts-library/roy-lichtenstein
|
|
||||||
sd-concepts-library/ruan-jia
|
|
||||||
sd-concepts-library/russian
|
|
||||||
sd-concepts-library/s1m-naoto-ohshima
|
|
||||||
sd-concepts-library/saheeli-rai
|
|
||||||
sd-concepts-library/sakimi-style
|
|
||||||
sd-concepts-library/salmonid
|
|
||||||
sd-concepts-library/sam-yang
|
|
||||||
sd-concepts-library/sanguo-guanyu
|
|
||||||
sd-concepts-library/sas-style
|
|
||||||
sd-concepts-library/scarlet-witch
|
|
||||||
sd-concepts-library/schloss-mosigkau
|
|
||||||
sd-concepts-library/scrap-style
|
|
||||||
sd-concepts-library/scratch-project
|
|
||||||
sd-concepts-library/sculptural-style
|
|
||||||
sd-concepts-library/sd-concepts-library-uma-meme
|
|
||||||
sd-concepts-library/seamless-ground
|
|
||||||
sd-concepts-library/selezneva-alisa
|
|
||||||
sd-concepts-library/sem-mac2n
|
|
||||||
sd-concepts-library/senneca
|
|
||||||
sd-concepts-library/seraphimmoonshadow-art
|
|
||||||
sd-concepts-library/sewerslvt
|
|
||||||
sd-concepts-library/she-hulk-law-art
|
|
||||||
sd-concepts-library/she-mask
|
|
||||||
sd-concepts-library/sherhook-painting
|
|
||||||
sd-concepts-library/sherhook-painting-v2
|
|
||||||
sd-concepts-library/shev-linocut
|
|
||||||
sd-concepts-library/shigure-ui-style
|
|
||||||
sd-concepts-library/shiny-polyman
|
|
||||||
sd-concepts-library/shrunken-head
|
|
||||||
sd-concepts-library/shu-doll
|
|
||||||
sd-concepts-library/shvoren-style
|
|
||||||
sd-concepts-library/sims-2-portrait
|
|
||||||
sd-concepts-library/singsing
|
|
||||||
sd-concepts-library/singsing-doll
|
|
||||||
sd-concepts-library/sintez-ico
|
|
||||||
sd-concepts-library/skyfalls
|
|
||||||
sd-concepts-library/slm
|
|
||||||
sd-concepts-library/smarties
|
|
||||||
sd-concepts-library/smiling-friend-style
|
|
||||||
sd-concepts-library/smooth-pencils
|
|
||||||
sd-concepts-library/smurf-style
|
|
||||||
sd-concepts-library/smw-map
|
|
||||||
sd-concepts-library/society-finch
|
|
||||||
sd-concepts-library/sorami-style
|
|
||||||
sd-concepts-library/spider-gwen
|
|
||||||
sd-concepts-library/spritual-monsters
|
|
||||||
sd-concepts-library/stable-diffusion-conceptualizer
|
|
||||||
sd-concepts-library/star-tours-posters
|
|
||||||
sd-concepts-library/stardew-valley-pixel-art
|
|
||||||
sd-concepts-library/starhavenmachinegods
|
|
||||||
sd-concepts-library/sterling-archer
|
|
||||||
sd-concepts-library/stretch-re1-robot
|
|
||||||
sd-concepts-library/stuffed-penguin-toy
|
|
||||||
sd-concepts-library/style-of-marc-allante
|
|
||||||
sd-concepts-library/summie-style
|
|
||||||
sd-concepts-library/sunfish
|
|
||||||
sd-concepts-library/super-nintendo-cartridge
|
|
||||||
sd-concepts-library/supitcha-mask
|
|
||||||
sd-concepts-library/sushi-pixel
|
|
||||||
sd-concepts-library/swamp-choe-2
|
|
||||||
sd-concepts-library/t-skrang
|
|
||||||
sd-concepts-library/takuji-kawano
|
|
||||||
sd-concepts-library/tamiyo
|
|
||||||
sd-concepts-library/tangles
|
|
||||||
sd-concepts-library/tb303
|
|
||||||
sd-concepts-library/tcirle
|
|
||||||
sd-concepts-library/teelip-ir-landscape
|
|
||||||
sd-concepts-library/teferi
|
|
||||||
sd-concepts-library/tela-lenca
|
|
||||||
sd-concepts-library/tela-lenca2
|
|
||||||
sd-concepts-library/terraria-style
|
|
||||||
sd-concepts-library/tesla-bot
|
|
||||||
sd-concepts-library/test
|
|
||||||
sd-concepts-library/test-epson
|
|
||||||
sd-concepts-library/test2
|
|
||||||
sd-concepts-library/testing
|
|
||||||
sd-concepts-library/thalasin
|
|
||||||
sd-concepts-library/thegeneral
|
|
||||||
sd-concepts-library/thorneworks
|
|
||||||
sd-concepts-library/threestooges
|
|
||||||
sd-concepts-library/thunderdome-cover
|
|
||||||
sd-concepts-library/thunderdome-covers
|
|
||||||
sd-concepts-library/ti-junglepunk-v0
|
|
||||||
sd-concepts-library/tili-concept
|
|
||||||
sd-concepts-library/titan-robot
|
|
||||||
sd-concepts-library/tnj
|
|
||||||
sd-concepts-library/toho-pixel
|
|
||||||
sd-concepts-library/tomcat
|
|
||||||
sd-concepts-library/tonal1
|
|
||||||
sd-concepts-library/tony-diterlizzi-s-planescape-art
|
|
||||||
sd-concepts-library/towerplace
|
|
||||||
sd-concepts-library/toy
|
|
||||||
sd-concepts-library/toy-bonnie-plush
|
|
||||||
sd-concepts-library/toyota-sera
|
|
||||||
sd-concepts-library/transmutation-circles
|
|
||||||
sd-concepts-library/trash-polka-artstyle
|
|
||||||
sd-concepts-library/travis-bedel
|
|
||||||
sd-concepts-library/trigger-studio
|
|
||||||
sd-concepts-library/trust-support
|
|
||||||
sd-concepts-library/trypophobia
|
|
||||||
sd-concepts-library/ttte
|
|
||||||
sd-concepts-library/tubby
|
|
||||||
sd-concepts-library/tubby-cats
|
|
||||||
sd-concepts-library/tudisco
|
|
||||||
sd-concepts-library/turtlepics
|
|
||||||
sd-concepts-library/type
|
|
||||||
sd-concepts-library/ugly-sonic
|
|
||||||
sd-concepts-library/uliana-kudinova
|
|
||||||
sd-concepts-library/uma
|
|
||||||
sd-concepts-library/uma-clean-object
|
|
||||||
sd-concepts-library/uma-meme
|
|
||||||
sd-concepts-library/uma-meme-style
|
|
||||||
sd-concepts-library/uma-style-classic
|
|
||||||
sd-concepts-library/unfinished-building
|
|
||||||
sd-concepts-library/urivoldemort
|
|
||||||
sd-concepts-library/uzumaki
|
|
||||||
sd-concepts-library/valorantstyle
|
|
||||||
sd-concepts-library/vb-mox
|
|
||||||
sd-concepts-library/vcr-classique
|
|
||||||
sd-concepts-library/venice
|
|
||||||
sd-concepts-library/vespertine
|
|
||||||
sd-concepts-library/victor-narm
|
|
||||||
sd-concepts-library/vietstoneking
|
|
||||||
sd-concepts-library/vivien-reid
|
|
||||||
sd-concepts-library/vkuoo1
|
|
||||||
sd-concepts-library/vraska
|
|
||||||
sd-concepts-library/w3u
|
|
||||||
sd-concepts-library/walter-wick-photography
|
|
||||||
sd-concepts-library/warhammer-40k-drawing-style
|
|
||||||
sd-concepts-library/waterfallshadow
|
|
||||||
sd-concepts-library/wayne-reynolds-character
|
|
||||||
sd-concepts-library/wedding
|
|
||||||
sd-concepts-library/wedding-HandPainted
|
|
||||||
sd-concepts-library/werebloops
|
|
||||||
sd-concepts-library/wheatland
|
|
||||||
sd-concepts-library/wheatland-arknight
|
|
||||||
sd-concepts-library/wheelchair
|
|
||||||
sd-concepts-library/wildkat
|
|
||||||
sd-concepts-library/willy-hd
|
|
||||||
sd-concepts-library/wire-angels
|
|
||||||
sd-concepts-library/wish-artist-stile
|
|
||||||
sd-concepts-library/wlop-style
|
|
||||||
sd-concepts-library/wojak
|
|
||||||
sd-concepts-library/wojaks-now
|
|
||||||
sd-concepts-library/wojaks-now-now-now
|
|
||||||
sd-concepts-library/xatu
|
|
||||||
sd-concepts-library/xatu2
|
|
||||||
sd-concepts-library/xbh
|
|
||||||
sd-concepts-library/xi
|
|
||||||
sd-concepts-library/xidiversity
|
|
||||||
sd-concepts-library/xioboma
|
|
||||||
sd-concepts-library/xuna
|
|
||||||
sd-concepts-library/xyz
|
|
||||||
sd-concepts-library/yb-anime
|
|
||||||
sd-concepts-library/yerba-mate
|
|
||||||
sd-concepts-library/yesdelete
|
|
||||||
sd-concepts-library/yf21
|
|
||||||
sd-concepts-library/yilanov2
|
|
||||||
sd-concepts-library/yinit
|
|
||||||
sd-concepts-library/yoji-shinkawa-style
|
|
||||||
sd-concepts-library/yolandi-visser
|
|
||||||
sd-concepts-library/yoshi
|
|
||||||
sd-concepts-library/youpi2
|
|
||||||
sd-concepts-library/youtooz-candy
|
|
||||||
sd-concepts-library/yuji-himukai-style
|
|
||||||
sd-concepts-library/zaney
|
|
||||||
sd-concepts-library/zaneypixelz
|
|
||||||
sd-concepts-library/zdenek-art
|
|
||||||
sd-concepts-library/zero
|
|
||||||
sd-concepts-library/zero-bottle
|
|
||||||
sd-concepts-library/zero-suit-samus
|
|
||||||
sd-concepts-library/zillertal-can
|
|
||||||
sd-concepts-library/zizigooloo
|
|
||||||
sd-concepts-library/zk
|
|
||||||
sd-concepts-library/zoroark
|
|
||||||
@@ -107,4 +107,4 @@ lightning:
|
|||||||
benchmark: True
|
benchmark: True
|
||||||
max_steps: 4000000
|
max_steps: 4000000
|
||||||
# max_steps: 4000
|
# max_steps: 4000
|
||||||
|
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ model:
|
|||||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
params:
|
params:
|
||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['sculpture']
|
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 1
|
num_vectors_per_token: 1
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ model:
|
|||||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
params:
|
params:
|
||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['sculpture']
|
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 8
|
num_vectors_per_token: 1
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
|
|
||||||
unet_config:
|
unet_config:
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ model:
|
|||||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
params:
|
params:
|
||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['sculpture']
|
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 6
|
num_vectors_per_token: 6
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
@@ -107,4 +107,4 @@ lightning:
|
|||||||
benchmark: False
|
benchmark: False
|
||||||
max_steps: 6200
|
max_steps: 6200
|
||||||
# max_steps: 4000
|
# max_steps: 4000
|
||||||
|
|
||||||
|
|||||||
@@ -1,65 +1,84 @@
|
|||||||
FROM python:3.10-slim AS builder
|
FROM ubuntu AS get_miniconda
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
|
||||||
|
# install wget
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
wget \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# download and install miniconda
|
||||||
|
ARG conda_version=py39_4.12.0-Linux-x86_64
|
||||||
|
ARG conda_prefix=/opt/conda
|
||||||
|
RUN wget --progress=dot:giga -O /miniconda.sh \
|
||||||
|
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
||||||
|
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
||||||
|
&& rm -f /miniconda.sh
|
||||||
|
|
||||||
|
FROM ubuntu AS invokeai
|
||||||
|
|
||||||
# use bash
|
# use bash
|
||||||
SHELL [ "/bin/bash", "-c" ]
|
SHELL [ "/bin/bash", "-c" ]
|
||||||
|
|
||||||
|
# clean bashrc
|
||||||
|
RUN echo "" > ~/.bashrc
|
||||||
|
|
||||||
# Install necesarry packages
|
# Install necesarry packages
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
gcc=4:10.2.* \
|
gcc \
|
||||||
libgl1-mesa-glx=20.3.* \
|
git \
|
||||||
libglib2.0-0=2.66.* \
|
libgl1-mesa-glx \
|
||||||
python3-dev=3.9.* \
|
libglib2.0-0 \
|
||||||
|
pip \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# set WORKDIR, PATH and copy sources
|
# clone repository, create models.yaml and create symlinks
|
||||||
ARG APPDIR=/usr/src/app
|
ARG invokeai_git=invoke-ai/InvokeAI
|
||||||
WORKDIR ${APPDIR}
|
ARG invokeai_branch=main
|
||||||
ENV PATH ${APPDIR}/.venv/bin:$PATH
|
ARG project_name=invokeai
|
||||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
ARG conda_env_file=environment-lin-cuda.yml
|
||||||
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||||
|
&& cp \
|
||||||
# install requirements
|
"/${project_name}/configs/models.yaml.example" \
|
||||||
RUN python3 -m venv .venv \
|
"/${project_name}/configs/models.yaml" \
|
||||||
&& pip install \
|
|
||||||
--upgrade \
|
|
||||||
--no-cache-dir \
|
|
||||||
'wheel>=0.38.4' \
|
|
||||||
&& pip install \
|
|
||||||
--no-cache-dir \
|
|
||||||
-r ${PIP_REQUIREMENTS}
|
|
||||||
|
|
||||||
FROM python:3.10-slim AS runtime
|
|
||||||
|
|
||||||
# setup environment
|
|
||||||
ARG APPDIR=/usr/src/app
|
|
||||||
WORKDIR ${APPDIR}
|
|
||||||
COPY --from=builder ${APPDIR} .
|
|
||||||
ENV \
|
|
||||||
PATH=${APPDIR}/.venv/bin:$PATH \
|
|
||||||
INVOKEAI_ROOT=/data \
|
|
||||||
INVOKE_MODEL_RECONFIGURE=--yes
|
|
||||||
|
|
||||||
# Install necesarry packages
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
libgl1-mesa-glx=20.3.* \
|
|
||||||
libglib2.0-0=2.66.* \
|
|
||||||
libopencv-dev=4.5.* \
|
|
||||||
&& ln -sf \
|
&& ln -sf \
|
||||||
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
|
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||||
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
|
"/${project_name}/environment.yml" \
|
||||||
&& python3 -c "from patchmatch import patch_match" \
|
&& ln -sf \
|
||||||
&& apt-get remove -y \
|
/data/models/v1-5-pruned-emaonly.ckpt \
|
||||||
--autoremove \
|
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
||||||
build-essential \
|
&& ln -sf \
|
||||||
&& apt-get autoclean \
|
/data/outputs/ \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
"/${project_name}/outputs"
|
||||||
|
|
||||||
# set Entrypoint and default CMD
|
# set workdir
|
||||||
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
WORKDIR "/${project_name}"
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
|
||||||
|
# install conda env and preload models
|
||||||
|
ARG conda_prefix=/opt/conda
|
||||||
|
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||||
|
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||||
|
&& conda init bash \
|
||||||
|
&& source ~/.bashrc \
|
||||||
|
&& conda env create \
|
||||||
|
--name "${project_name}" \
|
||||||
|
&& rm -Rf ~/.cache \
|
||||||
|
&& conda clean -afy \
|
||||||
|
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||||
|
|
||||||
|
RUN source ~/.bashrc \
|
||||||
|
&& python scripts/preload_models.py \
|
||||||
|
--no-interactive
|
||||||
|
|
||||||
|
# Copy entrypoint and set env
|
||||||
|
ENV CONDA_PREFIX="${conda_prefix}"
|
||||||
|
ENV PROJECT_NAME="${project_name}"
|
||||||
|
COPY docker-build/entrypoint.sh /
|
||||||
|
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||||
|
|||||||
@@ -1,86 +0,0 @@
|
|||||||
#######################
|
|
||||||
#### Builder stage ####
|
|
||||||
|
|
||||||
FROM library/ubuntu:22.04 AS builder
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt update && apt-get install -y \
|
|
||||||
git \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
python3-venv \
|
|
||||||
python3-pip \
|
|
||||||
build-essential \
|
|
||||||
python3-opencv \
|
|
||||||
libopencv-dev
|
|
||||||
|
|
||||||
# This is needed for patchmatch support
|
|
||||||
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
|
||||||
ln -sf opencv4.pc opencv.pc
|
|
||||||
|
|
||||||
ARG WORKDIR=/invokeai
|
|
||||||
WORKDIR ${WORKDIR}
|
|
||||||
|
|
||||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
|
||||||
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
|
||||||
torch==1.12.0+cu116 \
|
|
||||||
torchvision==0.13.0+cu116 &&\
|
|
||||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
|
||||||
pip install -r requirements.txt &&\
|
|
||||||
pip install -e .
|
|
||||||
|
|
||||||
|
|
||||||
#######################
|
|
||||||
#### Runtime stage ####
|
|
||||||
|
|
||||||
FROM library/ubuntu:22.04 as runtime
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV PYTHONUNBUFFERED=1
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt update && apt install -y --no-install-recommends \
|
|
||||||
git \
|
|
||||||
curl \
|
|
||||||
ncdu \
|
|
||||||
iotop \
|
|
||||||
bzip2 \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
python3-venv \
|
|
||||||
python3-pip \
|
|
||||||
build-essential \
|
|
||||||
python3-opencv \
|
|
||||||
libopencv-dev &&\
|
|
||||||
apt-get clean && apt-get autoclean
|
|
||||||
|
|
||||||
ARG WORKDIR=/invokeai
|
|
||||||
WORKDIR ${WORKDIR}
|
|
||||||
|
|
||||||
ENV INVOKEAI_ROOT=/mnt/invokeai
|
|
||||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
||||||
|
|
||||||
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
|
||||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
|
||||||
|
|
||||||
# build patchmatch
|
|
||||||
RUN python -c "from patchmatch import patch_match"
|
|
||||||
|
|
||||||
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
|
||||||
RUN touch /root/.invokeai
|
|
||||||
|
|
||||||
ENTRYPOINT ["bash"]
|
|
||||||
|
|
||||||
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
|
||||||
INVOKEAI_ROOT=/mnt/invokeai
|
|
||||||
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
|
||||||
HOST_MOUNT_PATH=${HOME}/invokeai
|
|
||||||
|
|
||||||
IMAGE=local/invokeai:latest
|
|
||||||
|
|
||||||
USER=$(shell id -u)
|
|
||||||
GROUP=$(shell id -g)
|
|
||||||
|
|
||||||
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
|
||||||
# This is consistent with the expected non-Docker behaviour.
|
|
||||||
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
|
||||||
|
|
||||||
build:
|
|
||||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
|
||||||
|
|
||||||
configure:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
||||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
||||||
${IMAGE} -c "python scripts/configure_invokeai.py"
|
|
||||||
|
|
||||||
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
|
||||||
web:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
||||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
||||||
-p 9090:9090 \
|
|
||||||
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
|
||||||
|
|
||||||
# Run the cli with the runtime dir mounted
|
|
||||||
cli:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
||||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
||||||
${IMAGE} -c "python scripts/invoke.py"
|
|
||||||
|
|
||||||
# Run the container with the runtime dir mounted and open a bash shell
|
|
||||||
shell:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
|
||||||
|
|
||||||
.PHONY: build configure web cli shell
|
|
||||||
@@ -1,35 +1,84 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
|
||||||
|
# configure values by using env when executing build.sh
|
||||||
|
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||||
|
|
||||||
source ./docker-build/env.sh \
|
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||||
|| echo "please execute docker-build/build.sh from repository root" \
|
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||||
|| exit 1
|
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||||
|
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||||
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo -e "You are using these values:\n"
|
echo "You are using these values:"
|
||||||
echo -e "Dockerfile:\t ${DOCKERFILE}"
|
echo -e "project_name:\t\t ${project_name}"
|
||||||
echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
|
echo -e "volumename:\t\t ${volumename}"
|
||||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
echo -e "arch:\t\t\t ${arch}"
|
||||||
echo -e "arch:\t\t ${ARCH}"
|
echo -e "platform:\t\t ${platform}"
|
||||||
echo -e "Platform:\t ${PLATFORM}"
|
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
||||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
||||||
|
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
||||||
|
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
||||||
|
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||||
|
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
_runAlpine() {
|
||||||
echo -e "Volume already exists\n"
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--interactive \
|
||||||
|
--tty \
|
||||||
|
--mount source="$volumename",target=/data \
|
||||||
|
--workdir /data \
|
||||||
|
alpine "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
_copyCheckpoints() {
|
||||||
|
echo "creating subfolders for models and outputs"
|
||||||
|
_runAlpine mkdir models
|
||||||
|
_runAlpine mkdir outputs
|
||||||
|
echo "downloading v1-5-pruned-emaonly.ckpt"
|
||||||
|
_runAlpine wget \
|
||||||
|
--header="Authorization: Bearer ${huggingface_token}" \
|
||||||
|
-O models/v1-5-pruned-emaonly.ckpt \
|
||||||
|
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
echo "done"
|
||||||
|
}
|
||||||
|
|
||||||
|
_checkVolumeContent() {
|
||||||
|
_runAlpine ls -lhA /data/models
|
||||||
|
}
|
||||||
|
|
||||||
|
_getModelMd5s() {
|
||||||
|
_runAlpine \
|
||||||
|
alpine sh -c "md5sum /data/models/*.ckpt"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||||
|
echo "Volume already exists"
|
||||||
|
if [[ -z "$(_checkVolumeContent)" ]]; then
|
||||||
|
echo "looks empty, copying checkpoint"
|
||||||
|
_copyCheckpoints
|
||||||
|
fi
|
||||||
|
echo "Models in ${volumename}:"
|
||||||
|
_checkVolumeContent
|
||||||
else
|
else
|
||||||
echo -n "createing docker volume "
|
echo -n "createing docker volume "
|
||||||
docker volume create "${VOLUMENAME}"
|
docker volume create "${volumename}"
|
||||||
|
_copyCheckpoints
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
docker build \
|
docker build \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${platform}" \
|
||||||
--tag="${INVOKEAI_TAG}" \
|
--tag "${invokeai_tag}" \
|
||||||
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
|
--build-arg project_name="${project_name}" \
|
||||||
--file="${DOCKERFILE}" \
|
--build-arg conda_version="${invokeai_conda_version}" \
|
||||||
|
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||||
|
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||||
|
--build-arg invokeai_git="${invokeai_git}" \
|
||||||
|
--build-arg invokeai_branch="${invokeai_branch}" \
|
||||||
|
--file ./docker-build/Dockerfile \
|
||||||
.
|
.
|
||||||
|
|||||||
8
docker-build/entrypoint.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
||||||
|
conda activate "${PROJECT_NAME}"
|
||||||
|
|
||||||
|
python scripts/invoke.py \
|
||||||
|
${@:---web --host=0.0.0.0}
|
||||||
@@ -1,10 +1,13 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
project_name=${PROJECT_NAME:-invokeai}
|
||||||
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
|
volumename=${VOLUMENAME:-${project_name}_data}
|
||||||
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
|
arch=${ARCH:-x86_64}
|
||||||
ARCH=${ARCH:-$(uname -m)}
|
platform=${PLATFORM:-Linux/${arch}}
|
||||||
PLATFORM=${PLATFORM:-Linux/${ARCH}}
|
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
||||||
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
|
|
||||||
INVOKEAI_BRANCH=$(git branch --show)
|
export project_name
|
||||||
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}}
|
export volumename
|
||||||
|
export arch
|
||||||
|
export platform
|
||||||
|
export invokeai_tag
|
||||||
|
|||||||
@@ -1,31 +1,15 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
|
||||||
|
|
||||||
source ./docker-build/env.sh \
|
|
||||||
|| echo "please run from repository root" \
|
|
||||||
|| exit 1
|
|
||||||
|
|
||||||
# check if HUGGINGFACE_TOKEN is available
|
|
||||||
# You must have accepted the terms of use for required models
|
|
||||||
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
|
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
|
||||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
|
||||||
|
|
||||||
docker run \
|
docker run \
|
||||||
--interactive \
|
--interactive \
|
||||||
--tty \
|
--tty \
|
||||||
--rm \
|
--rm \
|
||||||
--platform="$PLATFORM" \
|
--platform "$platform" \
|
||||||
--name="${REPOSITORY_NAME,,}" \
|
--name "$project_name" \
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
--hostname "$project_name" \
|
||||||
--mount="source=$VOLUMENAME,target=/data" \
|
--mount source="$volumename",target=/data \
|
||||||
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
--publish 9090:9090 \
|
||||||
--publish=9090:9090 \
|
"$invokeai_tag" ${1:+$@}
|
||||||
--cap-add=sys_nice \
|
|
||||||
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
|
|
||||||
"$INVOKEAI_TAG" ${1:+$@}
|
|
||||||
|
|||||||
@@ -4,275 +4,180 @@ title: Changelog
|
|||||||
|
|
||||||
# :octicons-log-16: **Changelog**
|
# :octicons-log-16: **Changelog**
|
||||||
|
|
||||||
## v2.2.4 <small>(11 December 2022)</small>
|
|
||||||
|
|
||||||
**the `invokeai` directory**
|
|
||||||
|
|
||||||
Previously there were two directories to worry about, the directory that
|
|
||||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
|
||||||
directory that contained the models files, embeddings, configuration and
|
|
||||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
|
||||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
|
||||||
live in a directory named `invokeai`. By default this directory is located in
|
|
||||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
|
||||||
where it goes at install time.
|
|
||||||
|
|
||||||
After installation, you can delete the install directory (the one that the zip
|
|
||||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
|
||||||
directory!
|
|
||||||
|
|
||||||
**Initialization file `invokeai/invokeai.init`**
|
|
||||||
|
|
||||||
You can place frequently-used startup options in this file, such as the default
|
|
||||||
number of steps or your preferred sampler. To keep everything in one place, this
|
|
||||||
file has now been moved into the `invokeai` directory and is named
|
|
||||||
`invokeai.init`.
|
|
||||||
|
|
||||||
**To update from Version 2.2.3**
|
|
||||||
|
|
||||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
|
||||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
|
||||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
|
||||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
|
||||||
and answer "Y" when asked if you want to reuse the directory.
|
|
||||||
|
|
||||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
|
||||||
does not know about the new directory layout and won't be fully functional.
|
|
||||||
|
|
||||||
**To update to 2.2.5 (and beyond) there's now an update path**
|
|
||||||
|
|
||||||
As they become available, you can update to more recent versions of InvokeAI
|
|
||||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
|
||||||
Running it without any arguments will install the most recent version of
|
|
||||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
|
||||||
script with an argument in the command shell. This syntax accepts the path to
|
|
||||||
the desired release's zip file, which you can find by clicking on the green
|
|
||||||
"Code" button on this repository's home page.
|
|
||||||
|
|
||||||
**Other 2.2.4 Improvements**
|
|
||||||
|
|
||||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
|
||||||
- fix link in documentation by @lstein in #1728
|
|
||||||
- Fix broken link by @ShawnZhong in #1736
|
|
||||||
- Remove reference to binary installer by @lstein in #1731
|
|
||||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
|
||||||
- Modify installer links to point closer to the source installer by @ebr in
|
|
||||||
#1745
|
|
||||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
|
||||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
|
||||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
|
||||||
- typo fix by @ofirkris in #1755
|
|
||||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
|
||||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
|
||||||
in #1765
|
|
||||||
- stability and usage improvements to binary & source installers by @lstein in
|
|
||||||
#1760
|
|
||||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
|
||||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
|
||||||
- invoke script cds to its location before running by @lstein in #1805
|
|
||||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
|
||||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
|
||||||
#1817
|
|
||||||
- Clean up readme by @hipsterusername in #1820
|
|
||||||
- Optimized Docker build with support for external working directory by @ebr in
|
|
||||||
#1544
|
|
||||||
- disable pushing the cloud container by @mauwii in #1831
|
|
||||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
|
||||||
#1837
|
|
||||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
|
||||||
- Account for flat models by @spezialspezial in #1766
|
|
||||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
|
||||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
|
||||||
@SammCheese in #1848
|
|
||||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
|
||||||
- New installer by @lstein
|
|
||||||
|
|
||||||
## v2.2.3 <small>(2 December 2022)</small>
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
|
|
||||||
This point release removes references to the binary installer from the
|
|
||||||
installation guide. The binary installer is not stable at the current
|
|
||||||
time. First time users are encouraged to use the "source" installer as
|
|
||||||
described in [Installing InvokeAI with the Source Installer](installation/INSTALL_SOURCE.md)
|
|
||||||
|
|
||||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
|
||||||
robust workflow solution for creating AI-generated and human facilitated
|
|
||||||
compositions. Additional enhancements have been made as well, improving safety,
|
|
||||||
ease of use, and installation.
|
|
||||||
|
|
||||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
|
||||||
512x768 image (and less for smaller images), and is compatible with
|
|
||||||
Windows/Linux/Mac (M1 & M2).
|
|
||||||
|
|
||||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
|
||||||
introduces the main WebUI enhancement for version 2.2 -
|
|
||||||
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
|
||||||
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
|
||||||
potential for users to create and iterate on their creations. The following
|
|
||||||
sections describe what's new for InvokeAI.
|
|
||||||
|
|
||||||
## v2.2.2 <small>(30 November 2022)</small>
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
|
|
||||||
The binary installer is not ready for prime time. First time users are recommended to install via the "source" installer accessible through the links at the bottom of this page.****
|
|
||||||
|
|
||||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
|
||||||
robust workflow solution for creating AI-generated and human facilitated
|
|
||||||
compositions. Additional enhancements have been made as well, improving safety,
|
|
||||||
ease of use, and installation.
|
|
||||||
|
|
||||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
|
||||||
512x768 image (and less for smaller images), and is compatible with
|
|
||||||
Windows/Linux/Mac (M1 & M2).
|
|
||||||
|
|
||||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
|
||||||
introduces the main WebUI enhancement for version 2.2 -
|
|
||||||
[The Unified Canvas](https://invoke-ai.github.io/InvokeAI/features/UNIFIED_CANVAS/).
|
|
||||||
This new workflow is the biggest enhancement added to the WebUI to date, and
|
|
||||||
unlocks a stunning amount of potential for users to create and iterate on their
|
|
||||||
creations. The following sections describe what's new for InvokeAI.
|
|
||||||
|
|
||||||
## v2.2.0 <small>(2 December 2022)</small>
|
|
||||||
|
|
||||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
|
||||||
robust workflow solution for creating AI-generated and human facilitated
|
|
||||||
compositions. Additional enhancements have been made as well, improving safety,
|
|
||||||
ease of use, and installation.
|
|
||||||
|
|
||||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
|
||||||
512x768 image (and less for smaller images), and is compatible with
|
|
||||||
Windows/Linux/Mac (M1 & M2).
|
|
||||||
|
|
||||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
|
||||||
introduces the main WebUI enhancement for version 2.2 -
|
|
||||||
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
|
||||||
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
|
||||||
potential for users to create and iterate on their creations. The following
|
|
||||||
sections describe what's new for InvokeAI.
|
|
||||||
|
|
||||||
## v2.1.3 <small>(13 November 2022)</small>
|
|
||||||
|
|
||||||
- A choice of installer scripts that automate installation and configuration.
|
|
||||||
See
|
|
||||||
[Installation](installation/index.md).
|
|
||||||
- A streamlined manual installation process that works for both Conda and
|
|
||||||
PIP-only installs. See
|
|
||||||
[Manual Installation](installation/INSTALL_MANUAL.md).
|
|
||||||
- The ability to save frequently-used startup options (model to load, steps,
|
|
||||||
sampler, etc) in a `.invokeai` file. See
|
|
||||||
[Client](features/CLI.md)
|
|
||||||
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
|
||||||
- Multiple bugs and edge cases squashed.
|
|
||||||
|
|
||||||
## v2.1.0 <small>(2 November 2022)</small>
|
## v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
- update mac instructions to use invokeai for env name by @willwillems in
|
||||||
- Update .gitignore by @blessedcoolant in #1040
|
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||||
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
- Update .gitignore by @blessedcoolant in
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||||
- Print out the device type which is used by @manzke in #1073
|
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||||
- Hires Addition by @hipsterusername in #1063
|
missing after merge by @skurovec in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||||
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||||
|
- Print out the device type which is used by @manzke in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||||
|
- Hires Addition by @hipsterusername in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in #1081
|
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in #1077
|
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||||
- fix noisy images at high step counts by @lstein in #1086
|
- fix noisy images at high step counts by @lstein in
|
||||||
- Generalize facetool strength argument by @db3000 in #1078
|
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||||
|
- Generalize facetool strength argument by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
#1066
|
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||||
- Update generate.py by @unreleased in #1109
|
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in #1125
|
- Update generate.py by @unreleased in
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
https://github.com/invoke-ai/InvokeAI/pull/1109
|
||||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in #1131
|
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
https://github.com/invoke-ai/InvokeAI/pull/1125
|
||||||
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||||
|
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1131
|
||||||
|
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||||
- Update gitignore to ignore codeformer weights at new location by
|
- Update gitignore to ignore codeformer weights at new location by
|
||||||
@spezialspezial in #1136
|
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
||||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in #1143
|
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
||||||
- Rework-mkdocs by @mauwii in #1144
|
https://github.com/invoke-ai/InvokeAI/pull/1143
|
||||||
|
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in #1127
|
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||||
- Fix gh actions by @mauwii in #1128
|
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
||||||
- Update .gitignore by @blessedcoolant in #1040
|
- update mac instructions to use invokeai for env name by @willwillems in
|
||||||
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
- Update .gitignore by @blessedcoolant in
|
||||||
- Print out the device type which is used by @manzke in #1073
|
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||||
- Hires Addition by @hipsterusername in #1063
|
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||||
|
missing after merge by @skurovec in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||||
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||||
|
- Print out the device type which is used by @manzke in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||||
|
- Hires Addition by @hipsterusername in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in #1081
|
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in #1077
|
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||||
- fix noisy images at high step counts by @lstein in #1086
|
- fix noisy images at high step counts by @lstein in
|
||||||
- Generalize facetool strength argument by @db3000 in #1078
|
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||||
|
- Generalize facetool strength argument by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
#1066
|
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||||
|
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in #1127
|
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||||
- Add text prompt to inpaint mask support by @lstein in #1133
|
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||||
|
- Add text prompt to inpaint mask support by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1133
|
||||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||||
#976
|
https://github.com/invoke-ai/InvokeAI/pull/976
|
||||||
- WebUI: Adds Codeformer support by @psychedelicious in #1151
|
- WebUI: Adds Codeformer support by @psychedelicious in
|
||||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in #1165
|
https://github.com/invoke-ai/InvokeAI/pull/1151
|
||||||
- Add Asymmetric Tiling by @carson-katri in #1132
|
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
||||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in #1172
|
https://github.com/invoke-ai/InvokeAI/pull/1165
|
||||||
|
- Add Asymmetric Tiling by @carson-katri in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1132
|
||||||
|
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1172
|
||||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||||
in #1175
|
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
||||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||||
in #1178
|
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
||||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in #1176
|
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
||||||
- fix clipseg loading problems by @lstein in #1177
|
https://github.com/invoke-ai/InvokeAI/pull/1176
|
||||||
- Correct color channels in upscale using array slicing by @wfng92 in #1181
|
- fix clipseg loading problems by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1177
|
||||||
|
- Correct color channels in upscale using array slicing by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1181
|
||||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||||
@psychedelicious in #1171
|
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
||||||
- fix a number of bugs in textual inversion by @lstein in #1190
|
- fix a number of bugs in textual inversion by @lstein in
|
||||||
- Improve !fetch, add !replay command by @ArDiouscuros in #882
|
https://github.com/invoke-ai/InvokeAI/pull/1190
|
||||||
- Fix generation of image with s>1000 by @holstvoogd in #951
|
- Improve !fetch, add !replay command by @ArDiouscuros in
|
||||||
- Web UI: Gallery improvements by @psychedelicious in #1198
|
https://github.com/invoke-ai/InvokeAI/pull/882
|
||||||
- Update CLI.md by @krummrey in #1211
|
- Fix generation of image with s>1000 by @holstvoogd in
|
||||||
- outcropping improvements by @lstein in #1207
|
https://github.com/invoke-ai/InvokeAI/pull/951
|
||||||
- add support for loading VAE autoencoders by @lstein in #1216
|
- Web UI: Gallery improvements by @psychedelicious in
|
||||||
- remove duplicate fix_func for MPS by @wfng92 in #1210
|
https://github.com/invoke-ai/InvokeAI/pull/1198
|
||||||
- Metadata storage and retrieval fixes by @lstein in #1204
|
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
||||||
- nix: add shell.nix file by @Cloudef in #1170
|
- outcropping improvements by @lstein in
|
||||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in #1185
|
https://github.com/invoke-ai/InvokeAI/pull/1207
|
||||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in #1187
|
- add support for loading VAE autoencoders by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1216
|
||||||
|
- remove duplicate fix_func for MPS by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1210
|
||||||
|
- Metadata storage and retrieval fixes by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1204
|
||||||
|
- nix: add shell.nix file by @Cloudef in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1170
|
||||||
|
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1185
|
||||||
|
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1187
|
||||||
- Allow user to generate images with initial noise as on M1 / mps system by
|
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||||
@ArDiouscuros in #981
|
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
||||||
- feat: adding filename format template by @plucked in #968
|
- feat: adding filename format template by @plucked in
|
||||||
- Web UI: Fixes broken bundle by @psychedelicious in #1242
|
https://github.com/invoke-ai/InvokeAI/pull/968
|
||||||
- Support runwayML custom inpainting model by @lstein in #1243
|
- Web UI: Fixes broken bundle by @psychedelicious in
|
||||||
- Update IMG2IMG.md by @talitore in #1262
|
https://github.com/invoke-ai/InvokeAI/pull/1242
|
||||||
|
- Support runwayML custom inpainting model by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1243
|
||||||
|
- Update IMG2IMG.md by @talitore in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1262
|
||||||
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||||
by @mauwii in #1233
|
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
||||||
- cut over from karras to model noise schedule for higher steps by @lstein in
|
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||||
#1222
|
https://github.com/invoke-ai/InvokeAI/pull/1222
|
||||||
- Prompt tweaks by @lstein in #1268
|
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
||||||
- Outpainting implementation by @Kyle0654 in #1251
|
- Outpainting implementation by @Kyle0654 in
|
||||||
- fixing aspect ratio on hires by @tjennings in #1249
|
https://github.com/invoke-ai/InvokeAI/pull/1251
|
||||||
- Fix-build-container-action by @mauwii in #1274
|
- fixing aspect ratio on hires by @tjennings in
|
||||||
- handle all unicode characters by @damian0815 in #1276
|
https://github.com/invoke-ai/InvokeAI/pull/1249
|
||||||
- adds models.user.yml to .gitignore by @JakeHL in #1281
|
- Fix-build-container-action by @mauwii in
|
||||||
- remove debug branch, set fail-fast to false by @mauwii in #1284
|
https://github.com/invoke-ai/InvokeAI/pull/1274
|
||||||
- Protect-secrets-on-pr by @mauwii in #1285
|
- handle all unicode characters by @damian0815 in
|
||||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in #1225
|
https://github.com/invoke-ai/InvokeAI/pull/1276
|
||||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in #1289
|
- adds models.user.yml to .gitignore by @JakeHL in
|
||||||
- Use proper authentication to download model by @mauwii in #1287
|
https://github.com/invoke-ai/InvokeAI/pull/1281
|
||||||
- Prevent indexing error for mode RGB by @spezialspezial in #1294
|
- remove debug branch, set fail-fast to false by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1284
|
||||||
|
- Protect-secrets-on-pr by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1285
|
||||||
|
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1225
|
||||||
|
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1289
|
||||||
|
- Use proper authentication to download model by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1287
|
||||||
|
- Prevent indexing error for mode RGB by @spezialspezial in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1294
|
||||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||||
unecesarry caches by @mauwii in #1293
|
unecesarry caches by @mauwii in
|
||||||
- add --no-interactive to configure_invokeai step by @mauwii in #1302
|
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||||
|
- add --no-interactive to preload_models step by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||||
contained environment (if necessary) before running the normal installation
|
contained environment (if necessary) before running the normal installation
|
||||||
script by @cmdr2 in #1253
|
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||||
- configure_invokeai.py script downloads the weight files by @lstein in #1290
|
- preload_models.py script downloads the weight files by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||||
|
|
||||||
## v2.0.1 <small>(13 October 2022)</small>
|
## v2.0.1 <small>(13 October 2022)</small>
|
||||||
|
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 359 KiB |
|
Before Width: | Height: | Size: 528 KiB |
|
Before Width: | Height: | Size: 601 KiB |
|
Before Width: | Height: | Size: 59 KiB |
|
Before Width: | Height: | Size: 122 KiB |
|
Before Width: | Height: | Size: 128 KiB |
|
Before Width: | Height: | Size: 99 KiB |
|
Before Width: | Height: | Size: 112 KiB |
|
Before Width: | Height: | Size: 107 KiB |
|
Before Width: | Height: | Size: 169 KiB |
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Command-Line Interface
|
title: CLI
|
||||||
---
|
---
|
||||||
|
|
||||||
# :material-bash: CLI
|
# :material-bash: CLI
|
||||||
@@ -99,7 +99,8 @@ overridden on a per-prompt basis (see
|
|||||||
| `--sampler <sampler>` | `-A<sampler>` | `k_lms` | Sampler to use. Use `-h` to get list of available samplers. |
|
| `--sampler <sampler>` | `-A<sampler>` | `k_lms` | Sampler to use. Use `-h` to get list of available samplers. |
|
||||||
| `--seamless` | | `False` | Create interesting effects by tiling elements of the image. |
|
| `--seamless` | | `False` | Create interesting effects by tiling elements of the image. |
|
||||||
| `--embedding_path <path>` | | `None` | Path to pre-trained embedding manager checkpoints, for custom models |
|
| `--embedding_path <path>` | | `None` | Path to pre-trained embedding manager checkpoints, for custom models |
|
||||||
| `--gfpgan_model_path` | | `experiments/pretrained_models/GFPGANv1.4.pth` | Path to GFPGAN model file. |
|
| `--gfpgan_dir` | | `src/gfpgan` | Path to where GFPGAN is installed. |
|
||||||
|
| `--gfpgan_model_path` | | `experiments/pretrained_models/GFPGANv1.4.pth` | Path to GFPGAN model file, relative to `--gfpgan_dir`. |
|
||||||
| `--free_gpu_mem` | | `False` | Free GPU memory after sampling, to allow image decoding and saving in low VRAM conditions |
|
| `--free_gpu_mem` | | `False` | Free GPU memory after sampling, to allow image decoding and saving in low VRAM conditions |
|
||||||
| `--precision` | | `auto` | Set model precision, default is selected by device. Options: auto, float32, float16, autocast |
|
| `--precision` | | `auto` | Set model precision, default is selected by device. Options: auto, float32, float16, autocast |
|
||||||
|
|
||||||
@@ -130,34 +131,20 @@ file should contain the startup options as you would type them on the
|
|||||||
command line (`--steps=10 --grid`), one argument per line, or a
|
command line (`--steps=10 --grid`), one argument per line, or a
|
||||||
mixture of both using any of the accepted command switch formats:
|
mixture of both using any of the accepted command switch formats:
|
||||||
|
|
||||||
!!! example "my unmodified initialization file"
|
!!! example ""
|
||||||
|
|
||||||
```bash title="~/.invokeai" linenums="1"
|
```bash
|
||||||
# InvokeAI initialization file
|
--web
|
||||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
--steps=28
|
||||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
--grid
|
||||||
# or renaming it and then running configure_invokeai.py again.
|
-f 0.6 -C 11.0 -A k_euler_a
|
||||||
|
|
||||||
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
|
||||||
--root="/Users/mauwii/invokeai"
|
|
||||||
|
|
||||||
# the --outdir option controls the default location of image files.
|
|
||||||
--outdir="/Users/mauwii/invokeai/outputs"
|
|
||||||
|
|
||||||
# You may place other frequently-used startup commands here, one or more per line.
|
|
||||||
# Examples:
|
|
||||||
# --web --host=0.0.0.0
|
|
||||||
# --steps=20
|
|
||||||
# -Ak_euler_a -C10.0
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
Note that the initialization file only accepts the command line arguments.
|
||||||
|
There are additional arguments that you can provide on the `invoke>` command
|
||||||
The initialization file only accepts the command line arguments.
|
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||||
There are additional arguments that you can provide on the `invoke>` command
|
Also be alert for empty blank lines at the end of the file, which will cause
|
||||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
an arguments error at startup time.
|
||||||
Also be alert for empty blank lines at the end of the file, which will cause
|
|
||||||
an arguments error at startup time.
|
|
||||||
|
|
||||||
## List of prompt arguments
|
## List of prompt arguments
|
||||||
|
|
||||||
@@ -209,17 +196,15 @@ Here are the invoke> command that apply to txt2img:
|
|||||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||||
|
|
||||||
!!! note
|
Note that the width and height of the image must be multiples of 64. You can
|
||||||
|
provide different values, but they will be rounded down to the nearest multiple
|
||||||
|
of 64.
|
||||||
|
|
||||||
the width and height of the image must be multiples of 64. You can
|
### This is an example of img2img:
|
||||||
provide different values, but they will be rounded down to the nearest multiple
|
|
||||||
of 64.
|
|
||||||
|
|
||||||
!!! example "This is a example of img2img"
|
```
|
||||||
|
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||||
```bash
|
```
|
||||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
|
||||||
```
|
|
||||||
|
|
||||||
This will modify the indicated vacation photograph by making it more like the
|
This will modify the indicated vacation photograph by making it more like the
|
||||||
prompt. Results will vary greatly depending on what is in the image. We also ask
|
prompt. Results will vary greatly depending on what is in the image. We also ask
|
||||||
@@ -269,7 +254,7 @@ description of the part of the image to replace. For example, if you have an
|
|||||||
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
||||||
selectively mask the bagel and replace it with a piece of cake this way:
|
selectively mask the bagel and replace it with a piece of cake this way:
|
||||||
|
|
||||||
```bash
|
```
|
||||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -281,26 +266,20 @@ are getting too much or too little masking you can adjust the threshold down (to
|
|||||||
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
||||||
value, we are insisting on a more stringent classification.
|
value, we are insisting on a more stringent classification.
|
||||||
|
|
||||||
```bash
|
```
|
||||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||||
```
|
```
|
||||||
|
|
||||||
### Custom Styles and Subjects
|
# Other Commands
|
||||||
|
|
||||||
You can load and use hundreds of community-contributed Textual
|
|
||||||
Inversion models just by typing the appropriate trigger phrase. Please
|
|
||||||
see [Concepts Library](CONCEPTS.md) for more details.
|
|
||||||
|
|
||||||
## Other Commands
|
|
||||||
|
|
||||||
The CLI offers a number of commands that begin with "!".
|
The CLI offers a number of commands that begin with "!".
|
||||||
|
|
||||||
### Postprocessing images
|
## Postprocessing images
|
||||||
|
|
||||||
To postprocess a file using face restoration or upscaling, use the `!fix`
|
To postprocess a file using face restoration or upscaling, use the `!fix`
|
||||||
command.
|
command.
|
||||||
|
|
||||||
#### `!fix`
|
### `!fix`
|
||||||
|
|
||||||
This command runs a post-processor on a previously-generated image. It takes a
|
This command runs a post-processor on a previously-generated image. It takes a
|
||||||
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
||||||
@@ -327,19 +306,19 @@ Some examples:
|
|||||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `!mask`
|
### !mask
|
||||||
|
|
||||||
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
||||||
automatically generate a mask of the area that matches the text prompt. It is
|
automatically generate a mask of the area that matches the text prompt. It is
|
||||||
useful for debugging the text masking process prior to inpainting with the
|
useful for debugging the text masking process prior to inpainting with the
|
||||||
`--text_mask` argument. See [INPAINTING.md] for details.
|
`--text_mask` argument. See [INPAINTING.md] for details.
|
||||||
|
|
||||||
### Model selection and importation
|
## Model selection and importation
|
||||||
|
|
||||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
The CLI allows you to add new models on the fly, as well as to switch among them
|
||||||
rapidly without leaving the script.
|
rapidly without leaving the script.
|
||||||
|
|
||||||
#### `!models`
|
### !models
|
||||||
|
|
||||||
This prints out a list of the models defined in `config/models.yaml'. The active
|
This prints out a list of the models defined in `config/models.yaml'. The active
|
||||||
model is bold-faced
|
model is bold-faced
|
||||||
@@ -352,7 +331,7 @@ laion400m not loaded <no description>
|
|||||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
#### `!switch <model>`
|
### !switch <model>
|
||||||
|
|
||||||
This quickly switches from one model to another without leaving the CLI script.
|
This quickly switches from one model to another without leaving the CLI script.
|
||||||
`invoke.py` uses a memory caching system; once a model has been loaded,
|
`invoke.py` uses a memory caching system; once a model has been loaded,
|
||||||
@@ -377,7 +356,7 @@ invoke> !switch waifu-diffusion
|
|||||||
| Making attention of type 'vanilla' with 512 in_channels
|
| Making attention of type 'vanilla' with 512 in_channels
|
||||||
| Using faster float16 precision
|
| Using faster float16 precision
|
||||||
>> Model loaded in 18.24s
|
>> Model loaded in 18.24s
|
||||||
>> Max VRAM used to load the model: 2.17G
|
>> Max VRAM used to load the model: 2.17G
|
||||||
>> Current VRAM usage:2.17G
|
>> Current VRAM usage:2.17G
|
||||||
>> Setting Sampler to k_lms
|
>> Setting Sampler to k_lms
|
||||||
|
|
||||||
@@ -397,7 +376,7 @@ laion400m not loaded <no description>
|
|||||||
waifu-diffusion cached Waifu Diffusion v1.3
|
waifu-diffusion cached Waifu Diffusion v1.3
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
#### `!import_model <path/to/model/weights>`
|
### !import_model <path/to/model/weights>
|
||||||
|
|
||||||
This command imports a new model weights file into InvokeAI, makes it available
|
This command imports a new model weights file into InvokeAI, makes it available
|
||||||
for image generation within the script, and writes out the configuration for the
|
for image generation within the script, and writes out the configuration for the
|
||||||
@@ -444,10 +423,10 @@ OK to import [n]? <b>y</b>
|
|||||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
| Making attention of type 'vanilla' with 512 in_channels
|
||||||
| Using faster float16 precision
|
| Using faster float16 precision
|
||||||
invoke>
|
invoke>
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
#### `!edit_model <name_of_model>`
|
###!edit_model <name_of_model>
|
||||||
|
|
||||||
The `!edit_model` command can be used to modify a model that is already defined
|
The `!edit_model` command can be used to modify a model that is already defined
|
||||||
in `config/models.yaml`. Call it with the short name of the model you wish to
|
in `config/models.yaml`. Call it with the short name of the model you wish to
|
||||||
@@ -484,12 +463,12 @@ text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
|||||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
||||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
||||||
|
|
||||||
### History processing
|
## History processing
|
||||||
|
|
||||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||||
retrieving them, modifying them, and re-running them.
|
retrieving them, modifying them, and re-running them.
|
||||||
|
|
||||||
#### `!history`
|
### !history
|
||||||
|
|
||||||
The invoke script keeps track of all the commands you issue during a session,
|
The invoke script keeps track of all the commands you issue during a session,
|
||||||
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
||||||
@@ -501,22 +480,20 @@ during the session (Windows), or the most recent 1000 commands (Mac|Linux). You
|
|||||||
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
||||||
history line number. For example:
|
history line number. For example:
|
||||||
|
|
||||||
!!! example ""
|
```bash
|
||||||
|
invoke> !history
|
||||||
|
...
|
||||||
|
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||||
|
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||||
|
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||||
|
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
|
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
|
...
|
||||||
|
invoke> !20
|
||||||
|
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
|
```
|
||||||
|
|
||||||
```bash
|
### !fetch
|
||||||
invoke> !history
|
|
||||||
...
|
|
||||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
|
||||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
|
||||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
|
||||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
|
||||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
|
||||||
...
|
|
||||||
invoke> !20
|
|
||||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
|
||||||
```
|
|
||||||
|
|
||||||
####`!fetch`
|
|
||||||
|
|
||||||
This command retrieves the generation parameters from a previously generated
|
This command retrieves the generation parameters from a previously generated
|
||||||
image and either loads them into the command line (Linux|Mac), or prints them
|
image and either loads them into the command line (Linux|Mac), or prints them
|
||||||
@@ -526,36 +503,33 @@ a folder with image png files, and wildcard \*.png to retrieve the dream command
|
|||||||
used to generate the images, and save them to a file commands.txt for further
|
used to generate the images, and save them to a file commands.txt for further
|
||||||
processing.
|
processing.
|
||||||
|
|
||||||
!!! example "load the generation command for a single png file"
|
This example loads the generation command for a single png file:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> !fetch 0000015.8929913.png
|
invoke> !fetch 0000015.8929913.png
|
||||||
# the script returns the next line, ready for editing and running:
|
# the script returns the next line, ready for editing and running:
|
||||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! example "fetch the generation commands from a batch of files and store them into `selected.txt`"
|
This one fetches the generation commands from a batch of files and stores them
|
||||||
|
into `selected.txt`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `!replay`
|
### !replay
|
||||||
|
|
||||||
This command replays a text file generated by !fetch or created manually
|
This command replays a text file generated by !fetch or created manually
|
||||||
|
|
||||||
!!! example
|
```
|
||||||
|
invoke> !replay outputs\selected-imgs\selected.txt
|
||||||
|
```
|
||||||
|
|
||||||
```bash
|
Note that these commands may behave unexpectedly if given a PNG file that was
|
||||||
invoke> !replay outputs\selected-imgs\selected.txt
|
not generated by InvokeAI.
|
||||||
```
|
|
||||||
|
|
||||||
!!! note
|
### !search <search string>
|
||||||
|
|
||||||
These commands may behave unexpectedly if given a PNG file that was
|
|
||||||
not generated by InvokeAI.
|
|
||||||
|
|
||||||
#### `!search <search string>`
|
|
||||||
|
|
||||||
This is similar to !history but it only returns lines that contain
|
This is similar to !history but it only returns lines that contain
|
||||||
`search string`. For example:
|
`search string`. For example:
|
||||||
@@ -565,7 +539,7 @@ invoke> !search surreal
|
|||||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `!clear`
|
### `!clear`
|
||||||
|
|
||||||
This clears the search history from memory and disk. Be advised that this
|
This clears the search history from memory and disk. Be advised that this
|
||||||
operation is irreversible and does not issue any warnings!
|
operation is irreversible and does not issue any warnings!
|
||||||
|
|||||||
@@ -1,131 +0,0 @@
|
|||||||
---
|
|
||||||
title: Concepts Library
|
|
||||||
---
|
|
||||||
|
|
||||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
|
||||||
|
|
||||||
## Using Textual Inversion Files
|
|
||||||
|
|
||||||
Textual inversion (TI) files are small models that customize the output of
|
|
||||||
Stable Diffusion image generation. They can augment SD with specialized subjects
|
|
||||||
and artistic styles. They are also known as "embeds" in the machine learning
|
|
||||||
world.
|
|
||||||
|
|
||||||
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
|
||||||
known in InvokeAI as "triggers." Triggers are often, but not always, denoted
|
|
||||||
using angle brackets as in "<trigger-phrase>". The two most common type of
|
|
||||||
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
|
||||||
different TI training packages. InvokeAI supports both formats, but its
|
|
||||||
[built-in TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
|
||||||
|
|
||||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
|
|
||||||
amassed a large ligrary of >800 community-contributed TI files covering a
|
|
||||||
broad range of subjects and styles. InvokeAI has built-in support for this
|
|
||||||
library which downloads and merges TI files automatically upon request. You can
|
|
||||||
also install your own or others' TI files by placing them in a designated
|
|
||||||
directory.
|
|
||||||
|
|
||||||
### An Example
|
|
||||||
|
|
||||||
Here are a few examples to illustrate how it works. All these images were
|
|
||||||
generated using the command-line client and the Stable Diffusion 1.5 model:
|
|
||||||
|
|
||||||
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
|
||||||
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
|
||||||
|  |  |  |  |
|
|
||||||
|
|
||||||
You can also combine styles and concepts:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
| A portrait of <alf> in <cartoona-animal> style |
|
|
||||||
| :--------------------------------------------------------: |
|
|
||||||
|  |
|
|
||||||
</figure>
|
|
||||||
## Using a Hugging Face Concept
|
|
||||||
|
|
||||||
!!! warning "Authenticating to HuggingFace"
|
|
||||||
|
|
||||||
Some concepts require valid authentication to HuggingFace. Without it, they will not be downloaded
|
|
||||||
and will be silently ignored.
|
|
||||||
|
|
||||||
If you used an installer to install InvokeAI, you may have already set a HuggingFace token.
|
|
||||||
If you skipped this step, you can:
|
|
||||||
|
|
||||||
- run the InvokeAI configuration script again (if you used a manual installer): `scripts/configure_invokeai.py`
|
|
||||||
- set one of the `HUGGINGFACE_TOKEN` or `HUGGING_FACE_HUB_TOKEN` environment variables to contain your token
|
|
||||||
|
|
||||||
Finally, if you already used any HuggingFace library on your computer, you might already have a token
|
|
||||||
in your local cache. Check for a hidden `.huggingface` directory in your home folder. If it
|
|
||||||
contains a `token` file, then you are all set.
|
|
||||||
|
|
||||||
|
|
||||||
Hugging Face TI concepts are downloaded and installed automatically as you
|
|
||||||
require them. This requires your machine to be connected to the Internet. To
|
|
||||||
find out what each concept is for, you can browse the
|
|
||||||
[Hugging Face concepts library](https://huggingface.co/sd-concepts-library) and
|
|
||||||
look at examples of what each concept produces.
|
|
||||||
|
|
||||||
When you have an idea of a concept you wish to try, go to the command-line
|
|
||||||
client (CLI) and type a `<` character and the beginning of the Hugging Face
|
|
||||||
concept name you wish to load. Press ++tab++, and the CLI will show you all
|
|
||||||
matching concepts. You can also type `<` and hit ++tab++ to get a listing of all
|
|
||||||
~800 concepts, but be prepared to scroll up to see them all! If there is more
|
|
||||||
than one match you can continue to type and ++tab++ until the concept is
|
|
||||||
completed.
|
|
||||||
|
|
||||||
!!! example
|
|
||||||
|
|
||||||
if you type in `<x` and hit ++tab++, you'll be prompted with the completions:
|
|
||||||
|
|
||||||
```py
|
|
||||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
|
||||||
```
|
|
||||||
|
|
||||||
Now type `id` and press ++tab++. It will be autocompleted to `<xidiversity>`
|
|
||||||
because this is a unique match.
|
|
||||||
|
|
||||||
Finish your prompt and generate as usual. You may include multiple concept terms
|
|
||||||
in the prompt.
|
|
||||||
|
|
||||||
If you have never used this concept before, you will see a message that the TI
|
|
||||||
model is being downloaded and installed. After this, the concept will be saved
|
|
||||||
locally (in the `models/sd-concepts-library` directory) for future use.
|
|
||||||
|
|
||||||
Several steps happen during downloading and installation, including a scan of
|
|
||||||
the file for malicious code. Should any errors occur, you will be warned and the
|
|
||||||
concept will fail to load. Generation will then continue treating the trigger
|
|
||||||
term as a normal string of characters (e.g. as literal `<ghibli-face>`).
|
|
||||||
|
|
||||||
You can also use `<concept-names>` in the WebGUI's prompt textbox. There is no
|
|
||||||
autocompletion at this time.
|
|
||||||
|
|
||||||
## Installing your Own TI Files
|
|
||||||
|
|
||||||
You may install any number of `.pt` and `.bin` files simply by copying them into
|
|
||||||
the `embeddings` directory of the InvokeAI runtime directory (usually `invokeai`
|
|
||||||
in your home directory). You may create subdirectories in order to organize the
|
|
||||||
files in any way you wish. Be careful not to overwrite one file with another.
|
|
||||||
For example, TI files generated by the Hugging Face toolkit share the named
|
|
||||||
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
|
||||||
|
|
||||||
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
|
||||||
files it finds there. At startup you will see a message similar to this one:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
|
||||||
```
|
|
||||||
|
|
||||||
Note the `*` trigger term. This is a placeholder term that many early TI
|
|
||||||
tutorials taught people to use rather than a more descriptive term.
|
|
||||||
Unfortunately, if you have multiple TI files that all use this term, only the
|
|
||||||
first one loaded will be triggered by use of the term.
|
|
||||||
|
|
||||||
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
|
||||||
or more TI files together. If it encounters a collision of terms, the script
|
|
||||||
will prompt you to select new terms that do not collide. See
|
|
||||||
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
|
||||||
|
|
||||||
## Further Reading
|
|
||||||
|
|
||||||
Please see [the repository](https://github.com/rinongal/textual_inversion) and
|
|
||||||
associated paper for details and limitations.
|
|
||||||
@@ -85,7 +85,7 @@ increasing size, every tile after the first in a row or column
|
|||||||
effectively only covers an extra `1 - overlap_ratio` on each axis. If
|
effectively only covers an extra `1 - overlap_ratio` on each axis. If
|
||||||
the input/`--init_img` is same size as a tile, the ideal (for time)
|
the input/`--init_img` is same size as a tile, the ideal (for time)
|
||||||
scaling factors with the default overlap (0.25) are 1.75, 2.5, 3.25,
|
scaling factors with the default overlap (0.25) are 1.75, 2.5, 3.25,
|
||||||
4.0, etc.
|
4.0 etc..
|
||||||
|
|
||||||
`-embiggen_tiles <spaced list of tiles>`
|
`-embiggen_tiles <spaced list of tiles>`
|
||||||
|
|
||||||
@@ -100,15 +100,6 @@ Tiles are numbered starting with one, and left-to-right,
|
|||||||
top-to-bottom. So, if you are generating a 3x3 tiled image, the
|
top-to-bottom. So, if you are generating a 3x3 tiled image, the
|
||||||
middle row would be `4 5 6`.
|
middle row would be `4 5 6`.
|
||||||
|
|
||||||
`-embiggen_strength <strength>`
|
|
||||||
|
|
||||||
Another advanced option if you want to experiment with the strength parameter
|
|
||||||
that embiggen uses when it calls Img2Img. Values range from 0.0 to 1.0
|
|
||||||
and lower values preserve more of the character of the initial image.
|
|
||||||
Values that are too high will result in a completely different end image,
|
|
||||||
while values that are too low will result in an image not dissimilar to one
|
|
||||||
you would get with ESRGAN upscaling alone. The default value is 0.4.
|
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
!!! example ""
|
!!! example ""
|
||||||
|
|||||||
@@ -12,19 +12,21 @@ stable diffusion to build the prompt on top of the image you provide, preserving
|
|||||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||||
option as shown here:
|
option as shown here:
|
||||||
|
|
||||||
!!! example ""
|
```commandline
|
||||||
|
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||||
|
```
|
||||||
|
|
||||||
```commandline
|
This will take the original image shown here:
|
||||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
|
||||||
```
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
{ width=320 }
|
||||||
|
</figure>
|
||||||
|
|
||||||
| original image | generated image |
|
and generate a new image based on it as shown here:
|
||||||
| :------------: | :-------------: |
|
|
||||||
| { width=320 } | { width=320 } |
|
|
||||||
|
|
||||||
</figure>
|
<figure markdown>
|
||||||
|
{ width=320 }
|
||||||
|
</figure>
|
||||||
|
|
||||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||||
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
||||||
@@ -86,15 +88,13 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
|||||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||||
seed `1592514025` develops something like this:
|
seed `1592514025` develops something like this:
|
||||||
|
|
||||||
!!! example ""
|
```bash
|
||||||
|
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||||
|
```
|
||||||
|
|
||||||
```bash
|
<figure markdown>
|
||||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|

|
||||||
```
|
</figure>
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
{ width=720 }
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||||
that it thinks look like "fire" and brings them a little bit more into focus,
|
that it thinks look like "fire" and brings them a little bit more into focus,
|
||||||
@@ -109,23 +109,25 @@ into the sequence at the appropriate point, with just the right amount of noise.
|
|||||||
|
|
||||||
### A concrete example
|
### A concrete example
|
||||||
|
|
||||||
!!! example "I want SD to draw a fire based on this hand-drawn image"
|
I want SD to draw a fire based on this hand-drawn image:
|
||||||
|
|
||||||
{ align=left }
|
<figure markdown>
|
||||||
|

|
||||||
|
</figure>
|
||||||
|
|
||||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||||
like:
|
like:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
With strength `0.4`, the steps look more like this:
|
With strength `0.4`, the steps look more like this:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||||
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||||
|
|||||||
@@ -158,7 +158,7 @@ when filling in missing regions. It has an almost uncanny ability to blend the
|
|||||||
new regions with existing ones in a semantically coherent way.
|
new regions with existing ones in a semantically coherent way.
|
||||||
|
|
||||||
To install the inpainting model, follow the
|
To install the inpainting model, follow the
|
||||||
[instructions](../installation/050_INSTALLING_MODELS.md) for installing a new model.
|
[instructions](../installation/INSTALLING_MODELS.md) for installing a new model.
|
||||||
You may use either the CLI (`invoke.py` script) or directly edit the
|
You may use either the CLI (`invoke.py` script) or directly edit the
|
||||||
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
||||||
for is that the the model `config` option must be set up to use
|
for is that the the model `config` option must be set up to use
|
||||||
|
|||||||
@@ -1,89 +0,0 @@
|
|||||||
---
|
|
||||||
title: The NSFW Checker
|
|
||||||
---
|
|
||||||
|
|
||||||
# :material-image-off: NSFW Checker
|
|
||||||
|
|
||||||
## The NSFW ("Safety") Checker
|
|
||||||
|
|
||||||
The Stable Diffusion image generation models will produce sexual
|
|
||||||
imagery if deliberately prompted, and will occasionally produce such
|
|
||||||
images when this is not intended. Such images are colloquially known
|
|
||||||
as "Not Safe for Work" (NSFW). This behavior is due to the nature of
|
|
||||||
the training set that Stable Diffusion was trained on, which culled
|
|
||||||
millions of "aesthetic" images from the Internet.
|
|
||||||
|
|
||||||
You may not wish to be exposed to these images, and in some
|
|
||||||
jurisdictions it may be illegal to publicly distribute such imagery,
|
|
||||||
including mounting a publicly-available server that provides
|
|
||||||
unfiltered images to the public. Furthermore, the [Stable Diffusion
|
|
||||||
weights
|
|
||||||
License](https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE-ModelWeights.txt)
|
|
||||||
forbids the model from being used to "exploit any of the
|
|
||||||
vulnerabilities of a specific group of persons."
|
|
||||||
|
|
||||||
For these reasons Stable Diffusion offers a "safety checker," a
|
|
||||||
machine learning model trained to recognize potentially disturbing
|
|
||||||
imagery. When a potentially NSFW image is detected, the checker will
|
|
||||||
blur the image and paste a warning icon on top. The checker can be
|
|
||||||
turned on and off on the command line using `--nsfw_checker` and
|
|
||||||
`--no-nsfw_checker`.
|
|
||||||
|
|
||||||
At installation time, InvokeAI will ask whether the checker should be
|
|
||||||
activated by default (neither argument given on the command line). The
|
|
||||||
response is stored in the InvokeAI initialization file (usually
|
|
||||||
`.invokeai` in your home directory). You can change the default at any
|
|
||||||
time by opening this file in a text editor and commenting or
|
|
||||||
uncommenting the line `--nsfw_checker`.
|
|
||||||
|
|
||||||
## Caveats
|
|
||||||
|
|
||||||
There are a number of caveats that you need to be aware of.
|
|
||||||
|
|
||||||
### Accuracy
|
|
||||||
|
|
||||||
The checker is [not perfect](https://arxiv.org/abs/2210.04610).It will
|
|
||||||
occasionally flag innocuous images (false positives), and will
|
|
||||||
frequently miss violent and gory imagery (false negatives). It rarely
|
|
||||||
fails to flag sexual imagery, but this has been known to happen. For
|
|
||||||
these reasons, the InvokeAI team prefers to refer to the software as a
|
|
||||||
"NSFW Checker" rather than "safety checker."
|
|
||||||
|
|
||||||
### Memory Usage and Performance
|
|
||||||
|
|
||||||
The NSFW checker consumes an additional 1.2G of GPU VRAM on top of the
|
|
||||||
3.4G of VRAM used by Stable Diffusion v1.5 (this is with
|
|
||||||
half-precision arithmetic). This means that the checker will not run
|
|
||||||
successfully on GPU cards with less than 6GB VRAM, and will reduce the
|
|
||||||
size of the images that you can produce.
|
|
||||||
|
|
||||||
The checker also introduces a slight performance penalty. Images will
|
|
||||||
take ~1 second longer to generate when the checker is
|
|
||||||
activated. Generally this is not noticeable.
|
|
||||||
|
|
||||||
### Intermediate Images in the Web UI
|
|
||||||
|
|
||||||
The checker only operates on the final image produced by the Stable
|
|
||||||
Diffusion algorithm. If you are using the Web UI and have enabled the
|
|
||||||
display of intermediate images, you will briefly be exposed to a
|
|
||||||
low-resolution (mosaicized) version of the final image before it is
|
|
||||||
flagged by the checker and replaced by a fully blurred version. You
|
|
||||||
are encouraged to turn **off** intermediate image rendering when you
|
|
||||||
are using the checker. Future versions of InvokeAI will apply
|
|
||||||
additional blurring to intermediate images when the checker is active.
|
|
||||||
|
|
||||||
### Watermarking
|
|
||||||
|
|
||||||
InvokeAI does not apply any sort of watermark to images it
|
|
||||||
generates. However, it does write metadata into the PNG data area,
|
|
||||||
including the prompt used to generate the image and relevant parameter
|
|
||||||
settings. These fields can be examined using the `sd-metadata.py`
|
|
||||||
script that comes with the InvokeAI package.
|
|
||||||
|
|
||||||
Note that several other Stable Diffusion distributions offer
|
|
||||||
wavelet-based "invisible" watermarking. We have experimented with the
|
|
||||||
library used to generate these watermarks and have reached the
|
|
||||||
conclusion that while the watermarking library may be adding
|
|
||||||
watermarks to PNG images, the currently available version is unable to
|
|
||||||
retrieve them successfully. If and when a functioning version of the
|
|
||||||
library becomes available, we will offer this feature as well.
|
|
||||||
@@ -133,6 +133,29 @@ outputs = g.txt2img("a unicorn in manhattan")
|
|||||||
|
|
||||||
Outputs is a list of lists in the format [filename1,seed1],[filename2,seed2]...].
|
Outputs is a list of lists in the format [filename1,seed1],[filename2,seed2]...].
|
||||||
|
|
||||||
Please see the documentation in ldm/generate.py for more information.
|
Please see ldm/generate.py for more information. A set of example scripts is coming RSN.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## **Preload Models**
|
||||||
|
|
||||||
|
In situations where you have limited internet connectivity or are blocked behind a firewall, you can
|
||||||
|
use the preload script to preload the required files for Stable Diffusion to run.
|
||||||
|
|
||||||
|
The preload script `scripts/preload_models.py` needs to be run once at least while connected to the
|
||||||
|
internet. In the following runs, it will load up the cached versions of the required files from the
|
||||||
|
`.cache` directory of the system.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
(invokeai) ~/stable-diffusion$ python3 ./scripts/preload_models.py
|
||||||
|
preloading bert tokenizer...
|
||||||
|
Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s]
|
||||||
|
Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s]
|
||||||
|
Downloading: 100%|██████████████████████████████████| 455k/455k [00:00<00:00, 4.36MB/s]
|
||||||
|
Downloading: 100%|██████████████████████████████████| 570/570 [00:00<00:00, 477kB/s]
|
||||||
|
...success
|
||||||
|
preloading kornia requirements...
|
||||||
|
Downloading: "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth" to /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth
|
||||||
|
100%|███████████████████████████████████████████████| 5.10M/5.10M [00:00<00:00, 101MB/s]
|
||||||
|
...success
|
||||||
|
```
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ A number of caveats:
|
|||||||
(`--iterations`) argument.
|
(`--iterations`) argument.
|
||||||
|
|
||||||
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||||
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
|
released by runwayML and installed by default by `scripts/preload_models.py`.
|
||||||
This model was trained specifically to harmoniously fill in image gaps. The
|
This model was trained specifically to harmoniously fill in image gaps. The
|
||||||
standard model will work as well, but you may notice color discontinuities at
|
standard model will work as well, but you may notice color discontinuities at
|
||||||
the border.
|
the border.
|
||||||
|
|||||||
@@ -6,39 +6,53 @@ title: Postprocessing
|
|||||||
|
|
||||||
## Intro
|
## Intro
|
||||||
|
|
||||||
This extension provides the ability to restore faces and upscale images.
|
This extension provides the ability to restore faces and upscale
|
||||||
|
images.
|
||||||
|
|
||||||
Face restoration and upscaling can be applied at the time you generate the
|
Face restoration and upscaling can be applied at the time you generate
|
||||||
images, or at any later time against a previously-generated PNG file, using the
|
the images, or at any later time against a previously-generated PNG
|
||||||
[!fix](#fixing-previously-generated-images) command.
|
file, using the [!fix](#fixing-previously-generated-images)
|
||||||
[Outpainting and outcropping](OUTPAINTING.md) can only be applied after the
|
command. [Outpainting and outcropping](OUTPAINTING.md) can only be
|
||||||
fact.
|
applied after the fact.
|
||||||
|
|
||||||
## Face Fixing
|
## Face Fixing
|
||||||
|
|
||||||
The default face restoration module is GFPGAN. The default upscale is
|
The default face restoration module is GFPGAN. The default upscale is
|
||||||
Real-ESRGAN. For an alternative face restoration module, see
|
Real-ESRGAN. For an alternative face restoration module, see [CodeFormer
|
||||||
[CodeFormer Support](#codeformer-support) below.
|
Support](#codeformer-support) below.
|
||||||
|
|
||||||
As of version 1.14, environment.yaml will install the Real-ESRGAN package into
|
As of version 1.14, environment.yaml will install the Real-ESRGAN
|
||||||
the standard install location for python packages, and will put GFPGAN into a
|
package into the standard install location for python packages, and
|
||||||
subdirectory of "src" in the InvokeAI directory. Upscaling with Real-ESRGAN
|
will put GFPGAN into a subdirectory of "src" in the InvokeAI
|
||||||
should "just work" without further intervention. Simply pass the `--upscale`
|
directory. Upscaling with Real-ESRGAN should "just work" without
|
||||||
(`-U`) option on the `invoke>` command line, or indicate the desired scale on
|
further intervention. Simply pass the `--upscale` (`-U`) option on the
|
||||||
the popup in the Web GUI.
|
`invoke>` command line, or indicate the desired scale on the popup in
|
||||||
|
the Web GUI.
|
||||||
|
|
||||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
**GFPGAN** requires a series of downloadable model files to
|
||||||
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
|
work. These are loaded when you run `scripts/preload_models.py`. If
|
||||||
error, please run the following from the InvokeAI directory:
|
GFPAN is failing with an error, please run the following from the
|
||||||
|
InvokeAI directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/configure_invokeai.py
|
python scripts/preload_models.py
|
||||||
```
|
```
|
||||||
|
|
||||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
If you do not run this script in advance, the GFPGAN module will attempt
|
||||||
download the models files the first time you try to perform facial
|
to download the models files the first time you try to perform facial
|
||||||
reconstruction.
|
reconstruction.
|
||||||
|
|
||||||
|
Alternatively, if you have GFPGAN installed elsewhere, or if you are
|
||||||
|
using an earlier version of this package which asked you to install
|
||||||
|
GFPGAN in a sibling directory, you may use the `--gfpgan_dir` argument
|
||||||
|
with `invoke.py` to set a custom path to your GFPGAN directory. _There
|
||||||
|
are other GFPGAN related boot arguments if you wish to customize
|
||||||
|
further._
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
You will now have access to two new prompt arguments.
|
||||||
|
|
||||||
### Upscaling
|
### Upscaling
|
||||||
|
|
||||||
`-U : <upscaling_factor> <upscaling_strength>`
|
`-U : <upscaling_factor> <upscaling_strength>`
|
||||||
@@ -105,17 +119,17 @@ actions.
|
|||||||
This repo also allows you to perform face restoration using
|
This repo also allows you to perform face restoration using
|
||||||
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
||||||
|
|
||||||
In order to setup CodeFormer to work, you need to download the models like with
|
In order to setup CodeFormer to work, you need to download the models
|
||||||
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
|
like with GFPGAN. You can do this either by running
|
||||||
downloading the
|
`preload_models.py` or by manually downloading the [model
|
||||||
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||||
|
|
||||||
You can use `-ft` prompt argument to swap between CodeFormer and the default
|
You can use `-ft` prompt argument to swap between CodeFormer and the
|
||||||
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
|
default GFPGAN. The above mentioned `-G` prompt argument will allow
|
||||||
strength of the restoration effect.
|
you to control the strength of the restoration effect.
|
||||||
|
|
||||||
### CodeFormer Usage
|
### Usage
|
||||||
|
|
||||||
The following command will perform face restoration with CodeFormer instead of
|
The following command will perform face restoration with CodeFormer instead of
|
||||||
the default gfpgan.
|
the default gfpgan.
|
||||||
@@ -143,9 +157,9 @@ situations when there is very little facial data to work with.
|
|||||||
## Fixing Previously-Generated Images
|
## Fixing Previously-Generated Images
|
||||||
|
|
||||||
It is easy to apply face restoration and/or upscaling to any
|
It is easy to apply face restoration and/or upscaling to any
|
||||||
previously-generated file. Just use the syntax
|
previously-generated file. Just use the syntax `!fix path/to/file.png
|
||||||
`!fix path/to/file.png <options>`. For example, to apply GFPGAN at strength 0.8
|
<options>`. For example, to apply GFPGAN at strength 0.8 and upscale
|
||||||
and upscale 2X for a file named `./outputs/img-samples/000044.2945021133.png`,
|
2X for a file named `./outputs/img-samples/000044.2945021133.png`,
|
||||||
just run:
|
just run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -156,7 +170,7 @@ A new file named `000044.2945021133.fixed.png` will be created in the output
|
|||||||
directory. Note that the `!fix` command does not replace the original file,
|
directory. Note that the `!fix` command does not replace the original file,
|
||||||
unlike the behavior at generate time.
|
unlike the behavior at generate time.
|
||||||
|
|
||||||
## How to disable
|
### Disabling
|
||||||
|
|
||||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||||
|
|||||||
@@ -20,55 +20,16 @@ would type at the invoke> prompt:
|
|||||||
Then pass this file's name to `invoke.py` when you invoke it:
|
Then pass this file's name to `invoke.py` when you invoke it:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/invoke.py --from_file "/path/to/prompts.txt"
|
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
||||||
```
|
```
|
||||||
|
|
||||||
You may also read a series of prompts from standard input by providing
|
You may read a series of prompts from standard input by providing a filename of
|
||||||
a filename of `-`. For example, here is a python script that creates a
|
`-`:
|
||||||
matrix of prompts, each one varying slightly:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
#!/usr/bin/env python
|
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
||||||
|
|
||||||
adjectives = ['sunny','rainy','overcast']
|
|
||||||
samplers = ['k_lms','k_euler_a','k_heun']
|
|
||||||
cfg = [7.5, 9, 11]
|
|
||||||
|
|
||||||
for adj in adjectives:
|
|
||||||
for samp in samplers:
|
|
||||||
for cg in cfg:
|
|
||||||
print(f'a {adj} day -A{samp} -C{cg}')
|
|
||||||
```
|
```
|
||||||
|
|
||||||
It's output looks like this (abbreviated):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
a sunny day -Aklms -C7.5
|
|
||||||
a sunny day -Aklms -C9
|
|
||||||
a sunny day -Aklms -C11
|
|
||||||
a sunny day -Ak_euler_a -C7.5
|
|
||||||
a sunny day -Ak_euler_a -C9
|
|
||||||
...
|
|
||||||
a overcast day -Ak_heun -C9
|
|
||||||
a overcast day -Ak_heun -C11
|
|
||||||
```
|
|
||||||
|
|
||||||
To feed it to invoke.py, pass the filename of "-"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python matrix.py | python scripts/invoke.py --from_file -
|
|
||||||
```
|
|
||||||
|
|
||||||
When the script is finished, each of the 27 combinations
|
|
||||||
of adjective, sampler and CFG will be executed.
|
|
||||||
|
|
||||||
The command-line interface provides `!fetch` and `!replay` commands
|
|
||||||
which allow you to read the prompts from a single previously-generated
|
|
||||||
image or a whole directory of them, write the prompts to a file, and
|
|
||||||
then replay them. Or you can create your own file of prompts and feed
|
|
||||||
them to the command-line client from within an interactive session.
|
|
||||||
See [Command-Line Interface](CLI.md) for details.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## **Negative and Unconditioned Prompts**
|
## **Negative and Unconditioned Prompts**
|
||||||
@@ -90,9 +51,7 @@ original prompt:
|
|||||||
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
That image has a woman, so if we want the horse without a rider, we can
|
That image has a woman, so if we want the horse without a rider, we can
|
||||||
@@ -102,9 +61,7 @@ this:
|
|||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
That's nice - but say we also don't want the image to be quite so blue. We can
|
That's nice - but say we also don't want the image to be quite so blue. We can
|
||||||
@@ -113,9 +70,7 @@ add "blue" to the list of negative prompts, so it's now [woman blue]:
|
|||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Getting close - but there's no sense in having a saddle when our horse doesn't
|
Getting close - but there's no sense in having a saddle when our horse doesn't
|
||||||
@@ -124,9 +79,7 @@ have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
|||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
!!! notes "Notes about this feature:"
|
!!! notes "Notes about this feature:"
|
||||||
@@ -171,12 +124,8 @@ this prompt of `a man picking apricots from a tree`, let's see what happens if
|
|||||||
we increase and decrease how much attention we want Stable Diffusion to pay to
|
we increase and decrease how much attention we want Stable Diffusion to pay to
|
||||||
the word `apricots`:
|
the word `apricots`:
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Using `-` to reduce apricot-ness:
|
Using `-` to reduce apricot-ness:
|
||||||
|
|
||||||
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
||||||
@@ -192,12 +141,8 @@ Using `+` to increase apricot-ness:
|
|||||||
You can also change the balance between different parts of a prompt. For
|
You can also change the balance between different parts of a prompt. For
|
||||||
example, below is a `mountain man`:
|
example, below is a `mountain man`:
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
And here he is with more mountain:
|
And here he is with more mountain:
|
||||||
|
|
||||||
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
||||||
@@ -240,27 +185,27 @@ use the `prompt2prompt` syntax to substitute words in the original prompt for
|
|||||||
words in a new prompt. This works for `img2img` as well.
|
words in a new prompt. This works for `img2img` as well.
|
||||||
|
|
||||||
- `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
- `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
||||||
- quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
- quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
||||||
- for single word substitutions parentheses are also optional:
|
- for single word substitutions parentheses are also optional:
|
||||||
`a cat.swap(dog) eating a hotdog`.
|
`a cat.swap(dog) eating a hotdog`.
|
||||||
- Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely
|
- Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely
|
||||||
corresponding to bloc97's `prompt_edit_spatial_start/_end` and
|
corresponding to bloc97's `prompt_edit_spatial_start/_end` and
|
||||||
`prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
|
`prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
|
||||||
intuitively understand.
|
intuitively understand.
|
||||||
- Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end`
|
- Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end`
|
||||||
argument means that the "spatial" (self-attention) edit will stop having any
|
argument means that the "spatial" (self-attention) edit will stop having any
|
||||||
effect after 30% (=0.3) of the steps have been done, leaving Stable
|
effect after 30% (=0.3) of the steps have been done, leaving Stable
|
||||||
Diffusion with 70% of the steps where it is free to decide for itself how to
|
Diffusion with 70% of the steps where it is free to decide for itself how to
|
||||||
reshape the cat-form into a dog form.
|
reshape the cat-form into a dog form.
|
||||||
- The numbers represent a percentage through the step sequence where the edits
|
- The numbers represent a percentage through the step sequence where the edits
|
||||||
should happen. 0 means the start (noisy starting image), 1 is the end (final
|
should happen. 0 means the start (noisy starting image), 1 is the end (final
|
||||||
image).
|
image).
|
||||||
- For img2img, the step sequence does not start at 0 but instead at
|
- For img2img, the step sequence does not start at 0 but instead at
|
||||||
(1-strength) - so if strength is 0.7, s_start and s_end must both be
|
(1-strength) - so if strength is 0.7, s_start and s_end must both be
|
||||||
greater than 0.3 (1-0.7) to have any effect.
|
greater than 0.3 (1-0.7) to have any effect.
|
||||||
- Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable
|
- Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable
|
||||||
Diffusion should have to change the shape of the subject being swapped.
|
Diffusion should have to change the shape of the subject being swapped.
|
||||||
- `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
- `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
||||||
|
|
||||||
The `prompt2prompt` code is based off
|
The `prompt2prompt` code is based off
|
||||||
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).
|
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).
|
||||||
@@ -314,18 +259,14 @@ usual, unless you fix the seed, the prompts will give you different results each
|
|||||||
time you run them.
|
time you run them.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere, red cube, hybrid"
|
### "blue sphere, red cube, hybrid"
|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
This example doesn't use melding at all and represents the default way of mixing
|
This example doesn't use melding at all and represents the default way of mixing
|
||||||
concepts.
|
concepts.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
It's interesting to see how the AI expressed the concept of "cube" as the four
|
It's interesting to see how the AI expressed the concept of "cube" as the four
|
||||||
@@ -333,7 +274,6 @@ quadrants of the enclosing frame. If you look closely, there is depth there, so
|
|||||||
the enclosing frame is actually a cube.
|
the enclosing frame is actually a cube.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||||
|
|
||||||

|

|
||||||
@@ -346,7 +286,6 @@ the AI's "latent space" of semantic representations. Where is Ludwig
|
|||||||
Wittgenstein when you need him?
|
Wittgenstein when you need him?
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||||
|
|
||||||

|

|
||||||
@@ -357,7 +296,6 @@ Definitely more blue-spherey. The cube is gone entirely, but it's really cool
|
|||||||
abstract art.
|
abstract art.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||||
|
|
||||||

|

|
||||||
@@ -368,7 +306,6 @@ Whoa...! I see blue and red, but no spheres or cubes. Is the word "hybrid"
|
|||||||
summoning up the concept of some sort of scifi creature? Let's find out.
|
summoning up the concept of some sort of scifi creature? Let's find out.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.5 red cube:0.5"
|
### "blue sphere:0.5 red cube:0.5"
|
||||||
|
|
||||||

|

|
||||||
|
|||||||
@@ -1,284 +0,0 @@
|
|||||||
---
|
|
||||||
title: Unified Canvas
|
|
||||||
---
|
|
||||||
|
|
||||||
The Unified Canvas is a tool designed to streamline and simplify the process of
|
|
||||||
composing an image using Stable Diffusion. It offers artists all of the
|
|
||||||
available Stable Diffusion generation modes (Text To Image, Image To Image,
|
|
||||||
Inpainting, and Outpainting) as a single unified workflow. The flexibility of
|
|
||||||
the tool allows you to tweak and edit image generations, extend images beyond
|
|
||||||
their initial size, and to create new content in a freeform way both inside and
|
|
||||||
outside of existing images.
|
|
||||||
|
|
||||||
This document explains the basics of using the Unified Canvas, introducing you
|
|
||||||
to its features and tools one by one. It also describes some of the more
|
|
||||||
advanced tools available to power users of the Canvas.
|
|
||||||
|
|
||||||
## Basics
|
|
||||||
|
|
||||||
The Unified Canvas consists of two layers: the **Base Layer** and the **Mask
|
|
||||||
Layer**. You can swap from one layer to the other by selecting the layer you
|
|
||||||
want in the drop-down menu on the top left corner of the Unified Canvas, or by
|
|
||||||
pressing the (Q) hotkey.
|
|
||||||
|
|
||||||
### Base Layer
|
|
||||||
|
|
||||||
The **Base Layer** is the image content currently managed by the Canvas, and can
|
|
||||||
be exported at any time to the gallery by using the **Save to Gallery** option.
|
|
||||||
When the Base Layer is selected, the Brush (B) and Eraser (E) tools will
|
|
||||||
directly manipulate the base layer. Any images uploaded to the Canvas, or sent
|
|
||||||
to the Unified Canvas from the gallery, will clear out all existing content and
|
|
||||||
set the Base layer to the new image.
|
|
||||||
|
|
||||||
### Staging Area
|
|
||||||
|
|
||||||
When you generate images, they will display in the Canvas's **Staging Area**,
|
|
||||||
alongside the Staging Area toolbar buttons. While the Staging Area is active,
|
|
||||||
you cannot interact with the Canvas itself.
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Accepting generations will commit the new generation to the **Base Layer**. You
|
|
||||||
can review all generated images using the Prev/Next arrows, save any individual
|
|
||||||
generations to your gallery (without committing to the Base layer) or discard
|
|
||||||
generations. While you can Undo a discard in an individual Canvas session, any
|
|
||||||
generations that are not saved will be lost when the Canvas resets.
|
|
||||||
|
|
||||||
### Mask Layer
|
|
||||||
|
|
||||||
The **Mask Layer** consists of any masked sections that have been created to
|
|
||||||
inform Inpainting generations. You can paint a new mask, or edit an existing
|
|
||||||
mask, using the Brush tool and the Eraser with the Mask layer set as your Active
|
|
||||||
layer. Any masked areas will only affect generation inside of the current
|
|
||||||
bounding box.
|
|
||||||
|
|
||||||
### Bounding Box
|
|
||||||
|
|
||||||
When generating a new image, Invoke will process and apply new images within the
|
|
||||||
area denoted by the **Bounding Box**. The Width & Height settings of the
|
|
||||||
Bounding Box, as well as its location within the Unified Canvas and pixels or
|
|
||||||
empty space that it encloses, determine how new invocations are generated - see
|
|
||||||
[Inpainting & Outpainting](#inpainting-and-outpainting) below. The Bounding Box
|
|
||||||
can be moved and resized using the Move (V) tool. It can also be resized using
|
|
||||||
the Bounding Box options in the Options Panel. By using these controls you can
|
|
||||||
generate larger or smaller images, control which sections of the image are being
|
|
||||||
processed, as well as control Bounding Box tools like the Bounding Box
|
|
||||||
fill/erase.
|
|
||||||
|
|
||||||
### <a name="inpainting-and-outpainting"></a> Inpainting & Outpainting
|
|
||||||
|
|
||||||
"Inpainting" means asking the AI to refine part of an image while leaving the
|
|
||||||
rest alone. For example, updating a portrait of your grandmother to have her
|
|
||||||
wear a biker's jacket.
|
|
||||||
|
|
||||||
| masked original | inpaint result |
|
|
||||||
| :-------------------------------------------------------------: | :----------------------------------------------------------------------------------------: |
|
|
||||||
|  |  |
|
|
||||||
|
|
||||||
"Outpainting" means asking the AI to expand the original image beyond its
|
|
||||||
original borders, making a bigger image that's still based on the original. For
|
|
||||||
example, extending the above image of your Grandmother in a biker's jacket to
|
|
||||||
include her wearing jeans (and while we're at it, a motorcycle!)
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
When you are using the Unified Canvas, Invoke decides automatically whether to
|
|
||||||
do Inpainting, Outpainting, ImageToImage, or TextToImage by looking inside the
|
|
||||||
area enclosed by the Bounding Box. It chooses the appropriate type of generation
|
|
||||||
based on whether the Bounding Box contains empty (transparent) areas on the Base
|
|
||||||
layer, or whether it contains colored areas from previous generations (or from
|
|
||||||
painted brushstrokes) on the Base layer, and/or whether the Mask layer contains
|
|
||||||
any brushstrokes. See [Generation Methods](#generation-methods) below for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
To get started with the Unified Canvas, you will want to generate a new base
|
|
||||||
layer using Txt2Img or importing an initial image. We'll refer to either of
|
|
||||||
these methods as the "initial image" in the below guide.
|
|
||||||
|
|
||||||
From there, you can consider the following techniques to augment your image:
|
|
||||||
|
|
||||||
- **New Images**: Move the bounding box to an empty area of the Canvas, type in
|
|
||||||
your prompt, and Invoke, to generate a new image using the Text to Image
|
|
||||||
function.
|
|
||||||
- **Image Correction**: Use the color picker and brush tool to paint corrections
|
|
||||||
on the image, switch to the Mask layer, and brush a mask over your painted
|
|
||||||
area to use **Inpainting**. You can also use the **ImageToImage** generation
|
|
||||||
method to invoke new interpretations of the image.
|
|
||||||
- **Image Expansion**: Move the bounding box to include a portion of your
|
|
||||||
initial image, and a portion of transparent/empty pixels, then Invoke using a
|
|
||||||
prompt that describes what you'd like to see in that area. This will Outpaint
|
|
||||||
the image. You'll typically find more coherent results if you keep about
|
|
||||||
50-60% of the original image in the bounding box. Make sure that the Image To
|
|
||||||
Image Strength slider is set to a high value - you may need to set it higher
|
|
||||||
than you are used to.
|
|
||||||
- **New Content on Existing Images**: If you want to add new details or objects
|
|
||||||
into your image, use the brush tool to paint a sketch of what you'd like to
|
|
||||||
see on the image, switch to the Mask layer, and brush a mask over your painted
|
|
||||||
area to use **Inpainting**. If the masked area is small, consider using a
|
|
||||||
smaller bounding box to take advantage of Invoke's automatic Scaling features,
|
|
||||||
which can help to produce better details.
|
|
||||||
- **And more**: There are a number of creative ways to use the Canvas, and the
|
|
||||||
above are just starting points. We're excited to see what you come up with!
|
|
||||||
|
|
||||||
## <a name="generation-methods"></a> Generation Methods
|
|
||||||
|
|
||||||
The Canvas can use all generation methods available (Txt2Img, Img2Img,
|
|
||||||
Inpainting, and Outpainting), and these will be automatically selected and used
|
|
||||||
based on the current selection area within the Bounding Box.
|
|
||||||
|
|
||||||
### Text to Image
|
|
||||||
|
|
||||||
If the Bounding Box is placed over an area of Canvas with an **empty Base
|
|
||||||
Layer**, invoking a new image will use **TextToImage**. This generates an
|
|
||||||
entirely new image based on your prompt.
|
|
||||||
|
|
||||||
### Image to Image
|
|
||||||
|
|
||||||
If the Bounding Box is placed over an area of Canvas with an **existing Base
|
|
||||||
Layer area with no transparent pixels or masks**, invoking a new image will use
|
|
||||||
**ImageToImage**. This uses the image within the bounding box and your prompt to
|
|
||||||
interpret a new image. The image will be closer to your original image at lower
|
|
||||||
Image to Image strengths.
|
|
||||||
|
|
||||||
### Inpainting
|
|
||||||
|
|
||||||
If the Bounding Box is placed over an area of Canvas with an **existing Base
|
|
||||||
Layer and any pixels selected using the Mask layer**, invoking a new image will
|
|
||||||
use **Inpainting**. Inpainting uses the existing colors/forms in the masked area
|
|
||||||
in order to generate a new image for the masked area only. The unmasked portion
|
|
||||||
of the image will remain the same. Image to Image strength applies to the
|
|
||||||
inpainted area.
|
|
||||||
|
|
||||||
If you desire something completely different from the original image in your new
|
|
||||||
generation (i.e., if you want Invoke to ignore existing colors/forms), consider
|
|
||||||
toggling the Inpaint Replace setting on, and use high values for both Inpaint
|
|
||||||
Replace and Image To Image Strength.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
|
|
||||||
By default, the **Scale Before Processing** option — which
|
|
||||||
inpaints more coherent details by generating at a larger resolution and then
|
|
||||||
scaling — is only activated when the Bounding Box is relatively small.
|
|
||||||
To get the best inpainting results you should therefore resize your Bounding
|
|
||||||
Box to the smallest area that contains your mask and enough surrounding detail
|
|
||||||
to help Stable Diffusion understand the context of what you want it to draw.
|
|
||||||
You should also update your prompt so that it describes _just_ the area within
|
|
||||||
the Bounding Box.
|
|
||||||
|
|
||||||
### Outpainting
|
|
||||||
|
|
||||||
If the Bounding Box is placed over an area of Canvas partially filled by an
|
|
||||||
existing Base Layer area and partially by transparent pixels or masks, invoking
|
|
||||||
a new image will use **Outpainting**, as well as **Inpainting** any masked
|
|
||||||
areas.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Advanced Features
|
|
||||||
|
|
||||||
Features with non-obvious behavior are detailed below, in order to provide
|
|
||||||
clarity on the intent and common use cases we expect for utilizing them.
|
|
||||||
|
|
||||||
### Toolbar
|
|
||||||
|
|
||||||
#### Mask Options
|
|
||||||
|
|
||||||
- **Enable Mask** - This flag can be used to Enable or Disable the currently
|
|
||||||
painted mask. If you have painted a mask, but you don't want it affect the
|
|
||||||
next invocation, but you _also_ don't want to delete it, then you can set this
|
|
||||||
option to Disable. When you want the mask back, set this back to Enable.
|
|
||||||
- **Preserve Masked Area** - When enabled, Preserve Masked Area inverts the
|
|
||||||
effect of the Mask on the Inpainting process. Pixels in masked areas will be
|
|
||||||
kept unchanged, and unmasked areas will be regenerated.
|
|
||||||
|
|
||||||
#### Creative Tools
|
|
||||||
|
|
||||||
- **Brush - Base/Mask Modes** - The Brush tool switches automatically between
|
|
||||||
different modes of operation for the Base and Mask layers respectively.
|
|
||||||
- On the Base layer, the brush will directly paint on the Canvas using the
|
|
||||||
color selected on the Brush Options menu.
|
|
||||||
- On the Mask layer, the brush will create a new mask. If you're finding the
|
|
||||||
mask difficult to see over the existing content of the Unified Canvas, you
|
|
||||||
can change the color it is drawn with using the color selector on the Mask
|
|
||||||
Options dropdown.
|
|
||||||
- **Erase Bounding Box** - On the Base layer, erases all pixels within the
|
|
||||||
Bounding Box.
|
|
||||||
- **Fill Bounding Box** - On the Base layer, fills all pixels within the
|
|
||||||
Bounding Box with the currently selected color.
|
|
||||||
|
|
||||||
#### Canvas Tools
|
|
||||||
|
|
||||||
- **Move Tool** - Allows for manipulation of the Canvas view (by dragging on the
|
|
||||||
Canvas, outside the bounding box), the Bounding Box (by dragging the edges of
|
|
||||||
the box), or the Width/Height of the Bounding Box (by dragging one of the 9
|
|
||||||
directional handles).
|
|
||||||
- **Reset View** - Click to re-orients the view to the center of the Bounding
|
|
||||||
Box.
|
|
||||||
- **Merge Visible** - If your browser is having performance problems drawing the
|
|
||||||
image in the Unified Canvas, click this to consolidate all of the information
|
|
||||||
currently being rendered by your browser into a merged copy of the image. This
|
|
||||||
lowers the resource requirements and should improve performance.
|
|
||||||
|
|
||||||
### Seam Correction
|
|
||||||
|
|
||||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
|
||||||
by Stable Diffusion into your existing image. To do this, the area around the
|
|
||||||
`seam` at the boundary between your image and the new generation is
|
|
||||||
automatically blended to produce a seamless output. In a fully automatic
|
|
||||||
process, a mask is generated to cover the seam, and then the area of the seam is
|
|
||||||
Inpainted.
|
|
||||||
|
|
||||||
Although the default options should work well most of the time, sometimes it can
|
|
||||||
help to alter the parameters that control the seam Inpainting. A wider seam and
|
|
||||||
a blur setting of about 1/3 of the seam have been noted as producing
|
|
||||||
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
|
||||||
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
|
||||||
|
|
||||||
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
|
||||||
mask around the seam.
|
|
||||||
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
|
||||||
masked area.
|
|
||||||
- **Seam Strength** - The Image To Image Strength parameter used for the
|
|
||||||
Inpainting generation that is applied to the seam area.
|
|
||||||
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
|
||||||
the seam.
|
|
||||||
|
|
||||||
### Infill & Scaling
|
|
||||||
|
|
||||||
- **Scale Before Processing & W/H**: When generating images with a bounding box
|
|
||||||
smaller than the optimized W/H of the model (e.g., 512x512 for SD1.5), this
|
|
||||||
feature first generates at a larger size with the same aspect ratio, and then
|
|
||||||
scales that image down to fill the selected area. This is particularly useful
|
|
||||||
when inpainting very small details. Scaling is optional but is enabled by
|
|
||||||
default.
|
|
||||||
- **Inpaint Replace**: When Inpainting, the default method is to utilize the
|
|
||||||
existing RGB values of the Base layer to inform the generation process. If
|
|
||||||
Inpaint Replace is enabled, noise is generated and blended with the existing
|
|
||||||
pixels (completely replacing the original RGB values at an Inpaint Replace
|
|
||||||
value of 1). This can help generate more variation from the pixels on the Base
|
|
||||||
layers.
|
|
||||||
- When using Inpaint Replace you should use a higher Image To Image Strength
|
|
||||||
value, especially at higher Inpaint Replace values
|
|
||||||
- **Infill Method**: Invoke currently supports two methods for producing RGB
|
|
||||||
values for use in the Outpainting process: Patchmatch and Tile. We believe
|
|
||||||
that Patchmatch is the superior method, however we provide support for Tile in
|
|
||||||
case Patchmatch cannot be installed or is unavailable on your computer.
|
|
||||||
- **Tile Size**: The Tile method for Outpainting sources small portions of the
|
|
||||||
original image and randomly place these into the areas being Outpainted. This
|
|
||||||
value sets the size of those tiles.
|
|
||||||
|
|
||||||
## Hot Keys
|
|
||||||
|
|
||||||
The Unified Canvas is a tool that excels when you use hotkeys. You can view the
|
|
||||||
full list of keyboard shortcuts, updated with all new features, by clicking the
|
|
||||||
Keyboard Shortcuts icon at the top right of the InvokeAI WebUI.
|
|
||||||
@@ -303,8 +303,6 @@ The WebGUI is only rapid development. Check back regularly for updates!
|
|||||||
| `--cors [CORS ...]` | Additional allowed origins, comma-separated |
|
| `--cors [CORS ...]` | Additional allowed origins, comma-separated |
|
||||||
| `--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network. |
|
| `--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network. |
|
||||||
| `--port PORT` | Web server: Port to listen on |
|
| `--port PORT` | Web server: Port to listen on |
|
||||||
| `--certfile CERTFILE` | Web server: Path to certificate file to use for SSL. Use together with --keyfile |
|
|
||||||
| `--keyfile KEYFILE` | Web server: Path to private key file to use for SSL. Use together with --certfile' |
|
|
||||||
| `--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver. |
|
| `--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver. |
|
||||||
|
|
||||||
### Web Specific Features
|
### Web Specific Features
|
||||||
|
|||||||
@@ -4,72 +4,59 @@ title: WebUI Hotkey List
|
|||||||
|
|
||||||
# :material-keyboard: **WebUI Hotkey List**
|
# :material-keyboard: **WebUI Hotkey List**
|
||||||
|
|
||||||
## App Hotkeys
|
## General
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| --------------- | ------------------ |
|
| ----------------- | ---------------------- |
|
||||||
| ++ctrl+enter++ | Invoke |
|
| ++a++ | Set All Parameters |
|
||||||
| ++shift+x++ | Cancel |
|
| ++s++ | Set Seed |
|
||||||
| ++alt+a++ | Focus Prompt |
|
| ++u++ | Upscale |
|
||||||
| ++o++ | Toggle Options |
|
| ++r++ | Restoration |
|
||||||
| ++shift+o++ | Pin Options |
|
| ++i++ | Show Metadata |
|
||||||
| ++z++ | Toggle Viewer |
|
| ++d++ ++d++ ++l++ | Delete Image |
|
||||||
| ++g++ | Toggle Gallery |
|
| ++alt+a++ | Focus prompt input |
|
||||||
| ++f++ | Maximize Workspace |
|
| ++shift+i++ | Send To Image to Image |
|
||||||
| ++1++ - ++5++ | Change Tabs |
|
| ++ctrl+enter++ | Start processing |
|
||||||
| ++"`"++ | Toggle Console |
|
| ++shift+x++ | cancel Processing |
|
||||||
|
| ++shift+d++ | Toggle Dark Mode |
|
||||||
|
| ++"`"++ | Toggle console |
|
||||||
|
|
||||||
## General Hotkeys
|
## Tabs
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| -------------- | ---------------------- |
|
| ------- | ------------------------- |
|
||||||
| ++p++ | Set Prompt |
|
| ++1++ | Go to Text To Image Tab |
|
||||||
| ++s++ | Set Seed |
|
| ++2++ | Go to Image to Image Tab |
|
||||||
| ++a++ | Set Parameters |
|
| ++3++ | Go to Inpainting Tab |
|
||||||
| ++shift+r++ | Restore Faces |
|
| ++4++ | Go to Outpainting Tab |
|
||||||
| ++shift+u++ | Upscale |
|
| ++5++ | Go to Nodes Tab |
|
||||||
| ++i++ | Show Info |
|
| ++6++ | Go to Post Processing Tab |
|
||||||
| ++shift+i++ | Send To Image To Image |
|
|
||||||
| ++del++ | Delete Image |
|
|
||||||
| ++esc++ | Close Panels |
|
|
||||||
|
|
||||||
## Gallery Hotkeys
|
## Gallery
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ----------------------| --------------------------- |
|
| -------------- | ------------------------------- |
|
||||||
| ++arrow-left++ | Previous Image |
|
| ++g++ | Toggle Gallery |
|
||||||
| ++arrow-right++ | Next Image |
|
| ++left++ | Go to previous image in gallery |
|
||||||
| ++shift+g++ | Toggle Gallery Pin |
|
| ++right++ | Go to next image in gallery |
|
||||||
| ++shift+arrow-up++ | Increase Gallery Image Size |
|
| ++shift+p++ | Pin gallery |
|
||||||
| ++shift+arrow-down++ | Decrease Gallery Image Size |
|
| ++shift+up++ | Increase gallery image size |
|
||||||
|
| ++shift+down++ | Decrease gallery image size |
|
||||||
|
| ++shift+r++ | Reset image gallery size |
|
||||||
|
|
||||||
## Unified Canvas Hotkeys
|
## Inpainting
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| --------------------------------- | ---------------------- |
|
| ---------------------------- | --------------------- |
|
||||||
| ++b++ | Select Brush |
|
| ++"["++ | Decrease brush size |
|
||||||
| ++e++ | Select Eraser |
|
| ++"]"++ | Increase brush size |
|
||||||
| ++bracket-left++ | Decrease Brush Size |
|
| ++alt+"["++ | Decrease mask opacity |
|
||||||
| ++bracket-right++ | Increase Brush Size |
|
| ++alt+"]"++ | Increase mask opacity |
|
||||||
| ++shift+bracket-left++ | Decrease Brush Opacity |
|
| ++b++ | Select brush |
|
||||||
| ++shift+bracket-right++ | Increase Brush Opacity |
|
| ++e++ | Select eraser |
|
||||||
| ++v++ | Move Tool |
|
| ++ctrl+z++ | Undo brush stroke |
|
||||||
| ++shift+f++ | Fill Bounding Box |
|
| ++ctrl+shift+z++, ++ctrl+y++ | Redo brush stroke |
|
||||||
| ++del++ / ++backspace++ | Erase Bounding Box |
|
| ++h++ | Hide mask |
|
||||||
| ++c++ | Select Color Picker |
|
| ++shift+m++ | Invert mask |
|
||||||
| ++n++ | Toggle Snap |
|
| ++shift+c++ | Clear mask |
|
||||||
| ++"Hold Space"++ | Quick Toggle Move |
|
| ++shift+j++ | Expand canvas |
|
||||||
| ++q++ | Toggle Layer |
|
|
||||||
| ++shift+c++ | Clear Mask |
|
|
||||||
| ++h++ | Hide Mask |
|
|
||||||
| ++shift+h++ | Show/Hide Bounding Box |
|
|
||||||
| ++shift+m++ | Merge Visible |
|
|
||||||
| ++shift+s++ | Save To Gallery |
|
|
||||||
| ++ctrl+c++ | Copy To Clipboard |
|
|
||||||
| ++shift+d++ | Download Image |
|
|
||||||
| ++ctrl+z++ | Undo |
|
|
||||||
| ++ctrl+y++ / ++ctrl+shift+z++ | Redo |
|
|
||||||
| ++r++ | Reset View |
|
|
||||||
| ++arrow-left++ | Previous Staging Image |
|
|
||||||
| ++arrow-right++ | Next Staging Image |
|
|
||||||
| ++enter++ | Accept Staging Image |
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
title: Overview
|
|
||||||
---
|
|
||||||
|
|
||||||
Here you can find the documentation for different features.
|
|
||||||
@@ -39,7 +39,7 @@ Looking for a short version? Here's a TL;DR in 3 tables.
|
|||||||
!!! tip "suggestions"
|
!!! tip "suggestions"
|
||||||
|
|
||||||
For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.
|
For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.
|
||||||
|
|
||||||
For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`).
|
For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
225
docs/index.md
@@ -6,14 +6,15 @@ title: Home
|
|||||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install -r docs/requirements-mkdocs.txt
|
pip install -r requirements-mkdocs.txt
|
||||||
mkdocs serve
|
mkdocs serve
|
||||||
```
|
```
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<div align="center" markdown>
|
<div align="center" markdown>
|
||||||
|
|
||||||
[](https://github.com/invoke-ai/InvokeAI)
|
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
||||||
|
|
||||||
|
[](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
@@ -69,11 +70,7 @@ image-to-image generator. It provides a streamlined process with various new
|
|||||||
features and options to aid the image generation process. It runs on Windows,
|
features and options to aid the image generation process. It runs on Windows,
|
||||||
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||||
|
|
||||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>]
|
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
[<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a
|
|
||||||
href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a
|
|
||||||
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas &
|
|
||||||
Q&A</a>]
|
|
||||||
|
|
||||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
@@ -83,19 +80,12 @@ Q&A</a>]
|
|||||||
|
|
||||||
## :octicons-package-dependencies-24: Installation
|
## :octicons-package-dependencies-24: Installation
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
This fork is supported across multiple platforms. You can find individual
|
||||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
installation instructions below.
|
||||||
driver).
|
|
||||||
|
|
||||||
First time users, please see
|
- :fontawesome-brands-linux: [Linux](installation/INSTALL_LINUX.md)
|
||||||
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
- :fontawesome-brands-windows: [Windows](installation/INSTALL_WINDOWS.md)
|
||||||
getting InvokeAI up and running on your system. For alternative installation and
|
- :fontawesome-brands-apple: [Macintosh](installation/INSTALL_MAC.md)
|
||||||
upgrade instructions, please see:
|
|
||||||
[InvokeAI Installation Overview](installation/)
|
|
||||||
|
|
||||||
Linux users who wish to make use of the PyPatchMatch inpainting functions will
|
|
||||||
need to perform a bit of extra work to enable this module. Instructions can be
|
|
||||||
found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
|
||||||
|
|
||||||
## :fontawesome-solid-computer: Hardware Requirements
|
## :fontawesome-solid-computer: Hardware Requirements
|
||||||
|
|
||||||
@@ -104,29 +94,25 @@ found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
|||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
|
|
||||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||||
only)
|
|
||||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||||
|
|
||||||
We do **not recommend** the following video cards due to issues with their
|
|
||||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
|
||||||
images in full-precision mode:
|
|
||||||
|
|
||||||
- NVIDIA 10xx series cards such as the 1080ti
|
|
||||||
- GTX 1650 series cards
|
|
||||||
- GTX 1660 series cards
|
|
||||||
|
|
||||||
### :fontawesome-solid-memory: Memory
|
### :fontawesome-solid-memory: Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
### :fontawesome-regular-hard-drive: Disk
|
### :fontawesome-regular-hard-drive: Disk
|
||||||
|
|
||||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
- At least 12 GB of free disk space for the machine learning model, Python, and
|
||||||
all its dependencies.
|
all its dependencies.
|
||||||
|
|
||||||
!!! info
|
!!! info
|
||||||
|
|
||||||
|
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
||||||
|
full-precision mode as shown below.
|
||||||
|
|
||||||
|
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter errors like
|
Precision is auto configured based on the device. If however you encounter errors like
|
||||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||||
`invoke.py` with the `--precision=float32` flag:
|
`invoke.py` with the `--precision=float32` flag:
|
||||||
@@ -135,114 +121,93 @@ images in full-precision mode:
|
|||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||||
```
|
```
|
||||||
|
|
||||||
## :octicons-gift-24: InvokeAI Features
|
|
||||||
|
|
||||||
- [The InvokeAI Web Interface](features/WEB.md) -
|
|
||||||
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
|
|
||||||
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
|
||||||
<!-- seperator -->
|
|
||||||
- [The Command Line Interace](features/CLI.md) -
|
|
||||||
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
|
|
||||||
[Outpainting](features/OUTPAINTING.md) -
|
|
||||||
[Adding custom styles and subjects](features/CONCEPTS.md) -
|
|
||||||
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
|
||||||
<!-- seperator -->
|
|
||||||
- [Generating Variations](features/VARIATIONS.md)
|
|
||||||
<!-- seperator -->
|
|
||||||
- [Prompt Engineering](features/PROMPTS.md)
|
|
||||||
<!-- seperator -->
|
|
||||||
- Miscellaneous
|
|
||||||
- [NSFW Checker](features/NSFW.md)
|
|
||||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
|
||||||
- [Other](features/OTHER.md)
|
|
||||||
|
|
||||||
## :octicons-log-16: Latest Changes
|
## :octicons-log-16: Latest Changes
|
||||||
|
|
||||||
### v2.2.4 <small>(11 December 2022)</small>
|
### v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
#### the `invokeai` directory
|
- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||||
|
support in the WebGUI
|
||||||
|
- Greatly improved navigation and user experience in the
|
||||||
|
[WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||||
|
- The prompt syntax has been enhanced with
|
||||||
|
[prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
|
||||||
|
- You can now load
|
||||||
|
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
||||||
|
without leaving the CLI.
|
||||||
|
- The installation process (via `scripts/preload_models.py`) now lets you select
|
||||||
|
among several popular
|
||||||
|
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
||||||
|
and downloads and installs them on your behalf. Among other models, this
|
||||||
|
script will install the current Stable Diffusion 1.5 model as well as a
|
||||||
|
StabilityAI variable autoencoder (VAE) which improves face generation.
|
||||||
|
- Tired of struggling with photoeditors to get the masked region of for
|
||||||
|
inpainting just right? Let the AI make the mask for you using
|
||||||
|
[text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p).
|
||||||
|
This feature allows you to specify the part of the image to paint over using
|
||||||
|
just English-language phrases.
|
||||||
|
- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI
|
||||||
|
with the
|
||||||
|
[outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
|
||||||
|
- Tired of seeing your subject's bodies duplicated or mangled when generating
|
||||||
|
larger-dimension images? Check out the `--hires` option in the CLI, or select
|
||||||
|
the corresponding toggle in the WebGUI.
|
||||||
|
- We now support textual inversion and fine-tune .bin styles and subjects from
|
||||||
|
the Hugging Face archive of
|
||||||
|
[SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file
|
||||||
|
using the `--embedding_path` option. (The next version will support merging
|
||||||
|
and loading of multiple simultaneous models).
|
||||||
|
- ...
|
||||||
|
|
||||||
Previously there were two directories to worry about, the directory that
|
### v2.0.1 <small>(13 October 2022)</small>
|
||||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
|
||||||
directory that contained the models files, embeddings, configuration and
|
|
||||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
|
||||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
|
||||||
live in a directory named `invokeai`. By default this directory is located in
|
|
||||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
|
||||||
where it goes at install time.
|
|
||||||
|
|
||||||
After installation, you can delete the install directory (the one that the zip
|
- fix noisy images at high step count when using k\* samplers
|
||||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
- dream.py script now calls invoke.py module directly rather than via a new
|
||||||
directory!
|
python process (which could break the environment)
|
||||||
|
|
||||||
##### Initialization file `invokeai/invokeai.init`
|
### v2.0.0 <small>(9 October 2022)</small>
|
||||||
|
|
||||||
You can place frequently-used startup options in this file, such as the default
|
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
||||||
number of steps or your preferred sampler. To keep everything in one place, this
|
backward compatibility.
|
||||||
file has now been moved into the `invokeai` directory and is named
|
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||||
`invokeai.init`.
|
- Support for
|
||||||
|
<a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a>
|
||||||
#### To update from Version 2.2.3
|
and
|
||||||
|
<a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
- img2img runs on all k\* samplers
|
||||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
- Support for
|
||||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
<a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative
|
||||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
prompts</a>
|
||||||
and answer "Y" when asked if you want to reuse the directory.
|
- Support for CodeFormer face reconstruction
|
||||||
|
- Support for Textual Inversion on Macintoshes
|
||||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
- Support in both WebGUI and CLI for
|
||||||
does not know about the new directory layout and won't be fully functional.
|
<a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing
|
||||||
|
of previously-generated images</a> using facial reconstruction, ESRGAN
|
||||||
#### To update to 2.2.5 (and beyond) there's now an update path.
|
upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen"
|
||||||
|
upscaling. See the `!fix` command.
|
||||||
As they become available, you can update to more recent versions of InvokeAI
|
- New `--hires` option on `invoke>` line allows
|
||||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger
|
||||||
Running it without any arguments will install the most recent version of
|
images to be created without duplicating elements</a>, at the cost of some
|
||||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
performance.
|
||||||
script with an argument in the command shell. This syntax accepts the path to
|
- New `--perlin` and `--threshold` options allow you to add and control
|
||||||
the desired release's zip file, which you can find by clicking on the green
|
variation during image generation (see
|
||||||
"Code" button on this repository's home page.
|
<a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding
|
||||||
|
and Perlin Noise Initialization</a>
|
||||||
#### Other 2.2.4 Improvements
|
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
||||||
|
of images and tweaking of previous settings.
|
||||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
||||||
- fix link in documentation by @lstein in #1728
|
platforms.
|
||||||
- Fix broken link by @ShawnZhong in #1736
|
- Improved
|
||||||
- Remove reference to binary installer by @lstein in #1731
|
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line
|
||||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
completion behavior</a>. New commands added:
|
||||||
- Modify installer links to point closer to the source installer by @ebr in
|
- List command-line history with `!history`
|
||||||
#1745
|
- Search command-line history with `!search`
|
||||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
- Clear history with `!clear`
|
||||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
configure. To switch away from auto use the new flag like
|
||||||
- typo fix by @ofirkris in #1755
|
`--precision=float32`.
|
||||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
|
||||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
|
||||||
in #1765
|
|
||||||
- stability and usage improvements to binary & source installers by @lstein in
|
|
||||||
#1760
|
|
||||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
|
||||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
|
||||||
- invoke script cds to its location before running by @lstein in #1805
|
|
||||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
|
||||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
|
||||||
#1817
|
|
||||||
- Clean up readme by @hipsterusername in #1820
|
|
||||||
- Optimized Docker build with support for external working directory by @ebr in
|
|
||||||
#1544
|
|
||||||
- disable pushing the cloud container by @mauwii in #1831
|
|
||||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
|
||||||
#1837
|
|
||||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
|
||||||
- Account for flat models by @spezialspezial in #1766
|
|
||||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
|
||||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
|
||||||
@SammCheese in #1848
|
|
||||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
|
||||||
- New installer by @lstein
|
|
||||||
|
|
||||||
For older changelogs, please visit the
|
For older changelogs, please visit the
|
||||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
**[CHANGELOG](CHANGELOG/#v114-11-september-2022)**.
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
|
|||||||
@@ -1,315 +0,0 @@
|
|||||||
---
|
|
||||||
title: Installing with the Automated Installer
|
|
||||||
---
|
|
||||||
|
|
||||||
# InvokeAI Automated Installation
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
The automated installer is a shell script that attempts to automate every step
|
|
||||||
needed to install and run InvokeAI on a stock computer running recent versions
|
|
||||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
|
||||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
|
||||||
|
|
||||||
## Walk through
|
|
||||||
|
|
||||||
1. Make sure that your system meets the
|
|
||||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
|
||||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
|
||||||
with an AMD GPU installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
!!! info "Required Space"
|
|
||||||
|
|
||||||
Installation requires roughly 18G of free disk space to load the libraries and
|
|
||||||
recommended model weights files.
|
|
||||||
|
|
||||||
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
|
|
||||||
|
|
||||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
|
||||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
|
||||||
"Powershell" on Windows) and type `python --version`. If Python is
|
|
||||||
installed, it will print out the version number. If it is version `3.9.1` or
|
|
||||||
higher, you meet requirements.
|
|
||||||
|
|
||||||
!!! warning "If you see an older version, or get a command not found error"
|
|
||||||
|
|
||||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
|
||||||
download the appropriate installer package for your platform. We recommend
|
|
||||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
|
||||||
which has been extensively tested with InvokeAI.
|
|
||||||
|
|
||||||
!!! warning "At this time we do not recommend Python 3.11"
|
|
||||||
|
|
||||||
_Please select your platform in the section below for platform-specific
|
|
||||||
setup requirements._
|
|
||||||
|
|
||||||
=== "Windows users"
|
|
||||||
|
|
||||||
- During the Python configuration process,
|
|
||||||
look out for a checkbox to add Python to your PATH
|
|
||||||
and select it. If the install script complains that it can't
|
|
||||||
find python, then open the Python installer again and choose
|
|
||||||
"Modify" existing installation.
|
|
||||||
|
|
||||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
|
||||||
|
|
||||||
=== "Mac users"
|
|
||||||
|
|
||||||
- After installing Python, you may need to run the
|
|
||||||
following command from the Terminal in order to install the Web
|
|
||||||
certificates needed to download model data from https sites. If
|
|
||||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
|
||||||
install, this is the problem, and you can fix it with this command:
|
|
||||||
|
|
||||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
|
||||||
|
|
||||||
- You may need to install the Xcode command line tools. These
|
|
||||||
are a set of tools that are needed to run certain applications in a
|
|
||||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
|
||||||
|
|
||||||
- To install, open a terminal window and run `xcode-select
|
|
||||||
--install`. You will get a macOS system popup guiding you through the
|
|
||||||
install. If you already have them installed, you will instead see some
|
|
||||||
output in the Terminal advising you that the tools are already installed.
|
|
||||||
|
|
||||||
- More information can be found here:
|
|
||||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
|
||||||
|
|
||||||
=== "Linux users"
|
|
||||||
|
|
||||||
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
|
|
||||||
|
|
||||||
On Ubuntu 22.04 and higher, run the following:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install -y python3 python3-pip python3-venv
|
|
||||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
|
||||||
```
|
|
||||||
|
|
||||||
On Ubuntu 20.04, the process is slightly different:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install -y software-properties-common
|
|
||||||
sudo add-apt-repository -y ppa:deadsnakes/ppa
|
|
||||||
sudo apt install python3.10 python3-pip python3.10-venv
|
|
||||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
|
||||||
```
|
|
||||||
|
|
||||||
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
|
|
||||||
|
|
||||||
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
|
|
||||||
|
|
||||||
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
|
||||||
|
|
||||||
3. The source installer is distributed in ZIP files. Go to the
|
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
|
||||||
look for a series of files named:
|
|
||||||
|
|
||||||
- [InvokeAI-installer-2.2.4-p5-mac.zip](https://github.com/invoke-ai/InvokeAI/files/10254728/InvokeAI-installer-2.2.4-p5-mac.zip)
|
|
||||||
- [InvokeAI-installer-2.2.4-p5-windows.zip](https://github.com/invoke-ai/InvokeAI/files/10254729/InvokeAI-installer-2.2.4-p5-windows.zip)
|
|
||||||
- [InvokeAI-installer-2.2.4-p5-linux.zip](https://github.com/invoke-ai/InvokeAI/files/10254727/InvokeAI-installer-2.2.4-p5-linux.zip)
|
|
||||||
|
|
||||||
Download the one that is appropriate for your operating system.
|
|
||||||
|
|
||||||
4. Unpack the zip file into a convenient directory. This will create a new
|
|
||||||
directory named "InvokeAI-Installer". This example shows how this would look
|
|
||||||
using the `unzip` command-line tool, but you may use any graphical or
|
|
||||||
command-line Zip extractor:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
|
||||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
|
||||||
creating: InvokeAI-Installer\
|
|
||||||
inflating: InvokeAI-Installer\install.bat
|
|
||||||
inflating: InvokeAI-Installer\readme.txt
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
After successful installation, you can delete the `InvokeAI-Installer`
|
|
||||||
directory.
|
|
||||||
|
|
||||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
|
||||||
accept the dialog box that asks you if you wish to modify your registry.
|
|
||||||
This activates long filename support on your system and will prevent
|
|
||||||
mysterious errors during installation.
|
|
||||||
|
|
||||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
|
||||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
|
||||||
Macintosh systems.
|
|
||||||
|
|
||||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
|
||||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
|
||||||
|
|
||||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd InvokeAI-Installer
|
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
|
||||||
directory with at least 18G of free space for a full install. InvokeAI and
|
|
||||||
all its support files will be installed into a new directory named
|
|
||||||
`invokeai` located at the location you specify.
|
|
||||||
|
|
||||||
- The default is to install the `invokeai` directory in your home directory,
|
|
||||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
|
||||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
|
||||||
on Macintoshes, where "YourName" is your login name.
|
|
||||||
|
|
||||||
- The script uses tab autocompletion to suggest directory path completions.
|
|
||||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
|
||||||
to suggest completions.
|
|
||||||
|
|
||||||
9. Sit back and let the install script work. It will install the third-party
|
|
||||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
|
||||||
install it.
|
|
||||||
|
|
||||||
Be aware that some of the library download and install steps take a long
|
|
||||||
time. In particular, the `pytorch` package is quite large and often appears
|
|
||||||
to get "stuck" at 99.9%. Have patience and the installation step will
|
|
||||||
eventually resume. However, there are occasions when the library install
|
|
||||||
does legitimately get stuck. If you have been waiting for more than ten
|
|
||||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
|
||||||
may restart it and it will pick up where it left off.
|
|
||||||
|
|
||||||
10. After installation completes, the installer will launch a script called
|
|
||||||
`configure_invokeai.py`, which will guide you through the first-time process
|
|
||||||
of selecting one or more Stable Diffusion model weights files, downloading
|
|
||||||
and configuring them. We provide a list of popular models that InvokeAI
|
|
||||||
performs well with. However, you can add more weight files later on using
|
|
||||||
the command-line client or the Web UI. See
|
|
||||||
[Installing Models](050_INSTALLING_MODELS.md) for details.
|
|
||||||
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you must agree to in order to use. The script will list the
|
|
||||||
steps you need to take to create an account on the official site that hosts
|
|
||||||
the weights files, accept the agreement, and provide an access token that
|
|
||||||
allows InvokeAI to legally download and install the weights files.
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
11. The script will now exit and you'll be ready to generate some images. Look
|
|
||||||
for the directory `invokeai` installed in the location you chose at the
|
|
||||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
|
||||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
|
||||||
it or typing its name at the command-line:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd invokeai
|
|
||||||
C:\Documents\Linco\invokeAI> invoke.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
|
||||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
|
||||||
latter, you can load the user interface by pointing your browser at
|
|
||||||
http://localhost:9090.
|
|
||||||
|
|
||||||
- The script also offers you a third option labeled "open the developer
|
|
||||||
console". If you choose this option, you will be dropped into a
|
|
||||||
command-line interface in which you can run python commands directly,
|
|
||||||
access developer tools, and launch InvokeAI with customized options.
|
|
||||||
|
|
||||||
12. You can launch InvokeAI with several different command-line arguments that
|
|
||||||
customize its behavior. For example, you can change the location of the
|
|
||||||
image output directory, or select your favorite sampler. See the
|
|
||||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
|
||||||
|
|
||||||
- To set defaults that will take effect every time you launch InvokeAI,
|
|
||||||
use a text editor (e.g. Notepad) to exit the file
|
|
||||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
|
||||||
follow to add and modify launch options.
|
|
||||||
|
|
||||||
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
|
||||||
configuration files, the model weight files, and outputs of image generation.
|
|
||||||
Once InvokeAI is installed, do not move or remove this directory."
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### _Package dependency conflicts_
|
|
||||||
|
|
||||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
|
||||||
the installer may occasionally pick up outdated libraries and either the
|
|
||||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
|
||||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
|
||||||
will bring InvokeAI up to date with the latest libraries.
|
|
||||||
|
|
||||||
### ldm from pypi
|
|
||||||
|
|
||||||
!!! warning
|
|
||||||
|
|
||||||
Some users have tried to correct dependency problems by installing
|
|
||||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
|
||||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
|
||||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
|
||||||
`pip uninstall ldm`.
|
|
||||||
|
|
||||||
### Corrupted configuration file
|
|
||||||
|
|
||||||
Everything seems to install ok, but `invoke` complains of a corrupted
|
|
||||||
configuration file and goes back into the configuration process (asking you to
|
|
||||||
download models, etc), but this doesn't fix the problem.
|
|
||||||
|
|
||||||
This issue is often caused by a misconfigured configuration directive in the
|
|
||||||
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
|
||||||
easiest way to fix the problem is to move the file out of the way and re-run
|
|
||||||
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
|
||||||
script) and run this command:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
configure_invokeai.py --root=.
|
|
||||||
```
|
|
||||||
|
|
||||||
Note the dot (.) after `--root`. It is part of the command.
|
|
||||||
|
|
||||||
_If none of these maneuvers fixes the problem_ then please report the problem to
|
|
||||||
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
|
||||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
|
||||||
assistance.
|
|
||||||
|
|
||||||
### other problems
|
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
|
||||||
available to help you. Either create an
|
|
||||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
|
||||||
make a request for help on the "bugs-and-support" channel of our
|
|
||||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
|
||||||
organization, but typically somebody will be available to help you within 24
|
|
||||||
hours, and often much sooner.
|
|
||||||
|
|
||||||
## Updating to newer versions
|
|
||||||
|
|
||||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
|
||||||
To update to the latest released version (recommended), run the `update.sh`
|
|
||||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
|
||||||
release and re-run the `configure_invokeai` script to download any updated
|
|
||||||
models files that may be needed. You can also use this to add additional models
|
|
||||||
that you did not select at installation time.
|
|
||||||
|
|
||||||
You can now close the developer console and run `invoke` as before. If you get
|
|
||||||
complaints about missing models, then you may need to do the additional step of
|
|
||||||
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
|
||||||
this, simply open up the developer's console again and type
|
|
||||||
`python scripts/configure_invokeai.py`.
|
|
||||||
|
|
||||||
You may also use the `update` script to install any selected version of
|
|
||||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
|
||||||
link of the version you wish to install. You can find the zip links by going to
|
|
||||||
the one of the release pages and looking for the **Assets** section at the
|
|
||||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
|
||||||
big code directory on the InvokeAI welcome page. When you find the version you
|
|
||||||
want to install, go to the green "<> Code" button at the top, and copy the
|
|
||||||
"Download ZIP" link.
|
|
||||||
|
|
||||||
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
|
||||||
version as its argument. For example, this will install the old 2.2.0 release.
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
|
||||||
```
|
|
||||||
@@ -1,589 +0,0 @@
|
|||||||
---
|
|
||||||
title: Installing Manually
|
|
||||||
---
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
!!! warning "This is for advanced Users"
|
|
||||||
|
|
||||||
who are already experienced with using conda or pip
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
You have two choices for manual installation, the [first
|
|
||||||
one](#PIP_method) uses basic Python virtual environment (`venv`)
|
|
||||||
commands and the PIP package manager. The [second one](#Conda_method)
|
|
||||||
based on the Anaconda3 package manager (`conda`). Both methods require
|
|
||||||
you to enter commands on the terminal, also known as the "console".
|
|
||||||
|
|
||||||
Note that the conda install method is currently deprecated and will not
|
|
||||||
be supported at some point in the future.
|
|
||||||
|
|
||||||
On Windows systems you are encouraged to install and use the
|
|
||||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
|
||||||
which provides compatibility with Linux and Mac shells and nice
|
|
||||||
features such as command-line completion.
|
|
||||||
|
|
||||||
## pip Install
|
|
||||||
|
|
||||||
To install InvokeAI with virtual environments and the PIP package
|
|
||||||
manager, please follow these steps:
|
|
||||||
|
|
||||||
1. Make sure you are using Python 3.9 or 3.10. The rest of the install
|
|
||||||
procedure depends on this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -V
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
|
||||||
GitHub:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create InvokeAI folder where you will follow the rest of the
|
|
||||||
steps.
|
|
||||||
|
|
||||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
|
||||||
environment named `invokeai`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -mvenv invokeai
|
|
||||||
source invokeai/bin/activate
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Make sure that pip is installed in your virtual environment an up to date:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -mensurepip --upgrade
|
|
||||||
python -mpip install --upgrade pip
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Pick the correct `requirements*.txt` file for your hardware and operating
|
|
||||||
system.
|
|
||||||
|
|
||||||
We have created a series of environment files suited for different operating
|
|
||||||
systems and GPU hardware. They are located in the
|
|
||||||
`environments-and-requirements` directory:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||
| filename | OS |
|
|
||||||
| :---------------------------------: | :-------------------------------------------------------------: |
|
|
||||||
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
|
||||||
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
|
||||||
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
|
||||||
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
|
||||||
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Select the appropriate requirements file, and make a link to it from
|
|
||||||
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
|
||||||
this from the top-level directory is:
|
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
=== "Macintosh and Linux"
|
|
||||||
|
|
||||||
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
|
|
||||||
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning
|
|
||||||
|
|
||||||
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
|
||||||
This is a base requirements file that does not have the platform-specific
|
|
||||||
libraries. Also, be sure to link or copy the platform-specific file to
|
|
||||||
a top-level file named `requirements.txt` as shown here. Running pip on
|
|
||||||
a requirements file in a subdirectory will not work as expected.
|
|
||||||
|
|
||||||
When this is done, confirm that a file named `requirements.txt` has been
|
|
||||||
created in the InvokeAI root directory and that it points to the correct
|
|
||||||
file in `environments-and-requirements`.
|
|
||||||
|
|
||||||
6. Run PIP
|
|
||||||
|
|
||||||
Be sure that the `invokeai` environment is active before doing this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install --prefer-binary -r requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
7. Set up the runtime directory
|
|
||||||
|
|
||||||
In this step you will initialize a runtime directory that will
|
|
||||||
contain the models, model config files, directory for textual
|
|
||||||
inversion embeddings, and your outputs. This keeps the runtime
|
|
||||||
directory separate from the source code and aids in updating.
|
|
||||||
|
|
||||||
You may pick any location for this directory using the `--root_dir`
|
|
||||||
option (abbreviated --root). If you don't pass this option, it will
|
|
||||||
default to `invokeai` in your home directory.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
configure_invokeai.py --root_dir ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
The script `configure_invokeai.py` will interactively guide you through the
|
|
||||||
process of downloading and installing the weights files needed for InvokeAI.
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you have to agree to. The script will list the steps you need
|
|
||||||
to take to create an account on the site that hosts the weights files,
|
|
||||||
accept the agreement, and provide an access token that allows InvokeAI to
|
|
||||||
legally download and install the weights files.
|
|
||||||
|
|
||||||
If you get an error message about a module not being installed, check that
|
|
||||||
the `invokeai` environment is active and if not, repeat step 5.
|
|
||||||
|
|
||||||
Note that `configure_invokeai.py` and `invoke.py` should be installed
|
|
||||||
under your virtual environment directory and the system should find them
|
|
||||||
on the PATH. If this isn't working on your system, you can call the
|
|
||||||
scripts directory using `python scripts/configure_invokeai.py` and
|
|
||||||
`python scripts/invoke.py`.
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
8. Run the command-line- or the web- interface:
|
|
||||||
|
|
||||||
Activate the environment (with `source invokeai/bin/activate`), and then
|
|
||||||
run the script `invoke.py`. If you selected a non-default location
|
|
||||||
for the runtime directory, please specify the path with the `--root_dir`
|
|
||||||
option (abbreviated below as `--root`):
|
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
|
|
||||||
|
|
||||||
=== "CLI"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke.py --root ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "local Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke.py --web --root ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Public Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
If you choose the run the web interface, point your browser at
|
|
||||||
http://localhost:9090 in order to load the GUI.
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
|
||||||
|
|
||||||
9. Render away!
|
|
||||||
|
|
||||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
|
||||||
can do with InvokeAI.
|
|
||||||
|
|
||||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
|
||||||
card with the ROCm driver, you may have to wait for over a minute the first
|
|
||||||
time you try to generate an image. Fortunately, after the warm up period
|
|
||||||
rendering will be fast.
|
|
||||||
|
|
||||||
10. Subsequently, to relaunch the script, be sure to run "conda activate
|
|
||||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
|
||||||
script. If you forget to activate the 'invokeai' environment, the script
|
|
||||||
will fail with multiple `ModuleNotFound` errors.
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### Conda method
|
|
||||||
|
|
||||||
1. Check that your system meets the
|
|
||||||
[hardware requirements](index.md#Hardware_Requirements) and has the
|
|
||||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
|
||||||
with an AMD GPU installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
|
||||||
of ROCm driver support on this platform.
|
|
||||||
|
|
||||||
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
|
||||||
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
|
||||||
information about the installed video card.
|
|
||||||
|
|
||||||
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
|
||||||
can skip this step.
|
|
||||||
|
|
||||||
2. You will need to install Anaconda3 and Git if they are not already
|
|
||||||
available. Use your operating system's preferred package manager, or
|
|
||||||
download the installers manually. You can find them here:
|
|
||||||
|
|
||||||
- [Anaconda3](https://www.anaconda.com/)
|
|
||||||
- [git](https://git-scm.com/downloads)
|
|
||||||
|
|
||||||
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
|
||||||
GitHub:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create InvokeAI folder where you will follow the rest of the
|
|
||||||
steps.
|
|
||||||
|
|
||||||
4. Enter the newly-created InvokeAI folder:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd InvokeAI
|
|
||||||
```
|
|
||||||
|
|
||||||
From this step forward make sure that you are working in the InvokeAI
|
|
||||||
directory!
|
|
||||||
|
|
||||||
5. Select the appropriate environment file:
|
|
||||||
|
|
||||||
We have created a series of environment files suited for different operating
|
|
||||||
systems and GPU hardware. They are located in the
|
|
||||||
`environments-and-requirements` directory:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||
| filename | OS |
|
|
||||||
| :----------------------: | :----------------------------: |
|
|
||||||
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
|
||||||
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
|
||||||
| environment-mac.yml | Macintosh |
|
|
||||||
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Choose the appropriate environment file for your system and link or copy it
|
|
||||||
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
|
||||||
following command from the repository-root:
|
|
||||||
|
|
||||||
!!! Example ""
|
|
||||||
|
|
||||||
=== "Macintosh and Linux"
|
|
||||||
|
|
||||||
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
When this is done, confirm that a file `environment.yml` has been linked in
|
|
||||||
the InvokeAI root directory and that it points to the correct file in the
|
|
||||||
`environments-and-requirements`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ls -la
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
|
|
||||||
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Afterwards verify that the file `environment.yml` has been created, either via the
|
|
||||||
explorer or by using the command `dir` from the terminal
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
dir
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
|
||||||
|
|
||||||
6. Create the conda environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda env update
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create a new environment named `invokeai` and install all InvokeAI
|
|
||||||
dependencies into it. If something goes wrong you should take a look at
|
|
||||||
[troubleshooting](#troubleshooting).
|
|
||||||
|
|
||||||
7. Activate the `invokeai` environment:
|
|
||||||
|
|
||||||
In order to use the newly created environment you will first need to
|
|
||||||
activate it
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda activate invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
Your command-line prompt should change to indicate that `invokeai` is active
|
|
||||||
by prepending `(invokeai)`.
|
|
||||||
|
|
||||||
8. Set up the runtime directory
|
|
||||||
|
|
||||||
In this step you will initialize a runtime directory that will
|
|
||||||
contain the models, model config files, directory for textual
|
|
||||||
inversion embeddings, and your outputs. This keeps the runtime
|
|
||||||
directory separate from the source code and aids in updating.
|
|
||||||
|
|
||||||
You may pick any location for this directory using the `--root_dir`
|
|
||||||
option (abbreviated --root). If you don't pass this option, it will
|
|
||||||
default to `invokeai` in your home directory.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/configure_invokeai.py --root_dir ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
The script `configure_invokeai.py` will interactively guide you through the
|
|
||||||
process of downloading and installing the weights files needed for InvokeAI.
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you have to agree to. The script will list the steps you need
|
|
||||||
to take to create an account on the site that hosts the weights files,
|
|
||||||
accept the agreement, and provide an access token that allows InvokeAI to
|
|
||||||
legally download and install the weights files.
|
|
||||||
|
|
||||||
If you get an error message about a module not being installed, check that
|
|
||||||
the `invokeai` environment is active and if not, repeat step 5.
|
|
||||||
|
|
||||||
Note that `configure_invokeai.py` and `invoke.py` should be
|
|
||||||
installed under your conda directory and the system should find
|
|
||||||
them automatically on the PATH. If this isn't working on your
|
|
||||||
system, you can call the scripts directory using `python
|
|
||||||
scripts/configure_invoke.py` and `python scripts/invoke.py`.
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
9. Run the command-line- or the web- interface:
|
|
||||||
|
|
||||||
Activate the environment (with `source invokeai/bin/activate`), and then
|
|
||||||
run the script `invoke.py`. If you selected a non-default location
|
|
||||||
for the runtime directory, please specify the path with the `--root_dir`
|
|
||||||
option (abbreviated below as `--root`):
|
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
|
||||||
|
|
||||||
=== "CLI"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke.py --root ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "local Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke.py --web --root ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Public Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
If you choose the run the web interface, point your browser at
|
|
||||||
http://localhost:9090 in order to load the GUI.
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of your choice.
|
|
||||||
|
|
||||||
10. Render away!
|
|
||||||
|
|
||||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
|
||||||
can do with InvokeAI.
|
|
||||||
|
|
||||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
|
||||||
card with the ROCm driver, you may have to wait for over a minute the first
|
|
||||||
time you try to generate an image. Fortunately, after the warm up period
|
|
||||||
rendering will be fast.
|
|
||||||
|
|
||||||
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
|
||||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
|
||||||
script. If you forget to activate the 'invokeai' environment, the script
|
|
||||||
will fail with multiple `ModuleNotFound` errors.
|
|
||||||
|
|
||||||
## Creating an "install" version of InvokeAI
|
|
||||||
|
|
||||||
If you wish you can install InvokeAI and all its dependencies in the
|
|
||||||
runtime directory. This allows you to delete the source code
|
|
||||||
repository and eliminates the need to provide `--root_dir` at startup
|
|
||||||
time. Note that this method only works with the PIP method.
|
|
||||||
|
|
||||||
1. Follow the instructions for the PIP install, but in step #2 put the
|
|
||||||
virtual environment into the runtime directory. For example, assuming the
|
|
||||||
runtime directory lives in `~/Programs/invokeai`, you'd run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -menv ~/Programs/invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
|
|
||||||
step.
|
|
||||||
|
|
||||||
3. Run one additional step while you are in the source code repository
|
|
||||||
directory `pip install .` (note the dot at the end).
|
|
||||||
|
|
||||||
4. That's all! Now, whenever you activate the virtual environment,
|
|
||||||
`invoke.py` will know where to look for the runtime directory without
|
|
||||||
needing a `--root_dir` argument. In addition, you can now move or
|
|
||||||
delete the source code repository entirely.
|
|
||||||
|
|
||||||
(Don't move the runtime directory!)
|
|
||||||
|
|
||||||
## Updating to newer versions of the script
|
|
||||||
|
|
||||||
This distribution is changing rapidly. If you used the `git clone` method
|
|
||||||
(step 5) to download the InvokeAI directory, then to update to the latest and
|
|
||||||
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git pull
|
|
||||||
conda env update
|
|
||||||
python scripts/configure_invokeai.py --skip-sd-weights #optional
|
|
||||||
```
|
|
||||||
|
|
||||||
This will bring your local copy into sync with the remote one. The last step may
|
|
||||||
be needed to take advantage of new features or released models. The
|
|
||||||
`--skip-sd-weights` flag will prevent the script from prompting you to download
|
|
||||||
the big Stable Diffusion weights files.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
Here are some common issues and their suggested solutions.
|
|
||||||
|
|
||||||
### Conda
|
|
||||||
|
|
||||||
#### Conda fails before completing `conda update`
|
|
||||||
|
|
||||||
The usual source of these errors is a package incompatibility. While we have
|
|
||||||
tried to minimize these, over time packages get updated and sometimes introduce
|
|
||||||
incompatibilities.
|
|
||||||
|
|
||||||
We suggest that you search
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
|
||||||
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
|
||||||
|
|
||||||
You may also try to install the broken packages manually using PIP. To do this,
|
|
||||||
activate the `invokeai` environment, and run `pip install` with the name and
|
|
||||||
version of the package that is causing the incompatibility. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install test-tube==0.7.5
|
|
||||||
```
|
|
||||||
|
|
||||||
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
|
||||||
script runs without errors. Please report to
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
|
||||||
to work around the problem so that others can benefit from your investigation.
|
|
||||||
|
|
||||||
### Create Conda Environment fails on MacOS
|
|
||||||
|
|
||||||
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
|
||||||
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
|
||||||
Start by installing additional XCode command line tools, followed by brew install llvm.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
xcode-select --install
|
|
||||||
brew install llvm
|
|
||||||
```
|
|
||||||
|
|
||||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
|
||||||
|
|
||||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
|
||||||
|
|
||||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
|
||||||
have linked to the correct environment file and run `conda update` again.
|
|
||||||
|
|
||||||
If the problem persists, a more extreme measure is to clear Conda's caches and
|
|
||||||
remove the `invokeai` environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda deactivate
|
|
||||||
conda env remove -n invokeai
|
|
||||||
conda clean -a
|
|
||||||
conda update
|
|
||||||
```
|
|
||||||
|
|
||||||
This removes all cached library files, including ones that may have been
|
|
||||||
corrupted somehow. (This is not supposed to happen, but does anyway).
|
|
||||||
|
|
||||||
#### `invoke.py` crashes at a later stage
|
|
||||||
|
|
||||||
If the CLI or web site had been working ok, but something unexpected happens
|
|
||||||
later on during the session, you've encountered a code bug that is probably
|
|
||||||
unrelated to an install issue. Please search
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
|
||||||
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
|
||||||
|
|
||||||
#### My renders are running very slowly
|
|
||||||
|
|
||||||
You may have installed the wrong torch (machine learning) package, and the
|
|
||||||
system is running on CPU rather than the GPU. To check, look at the log messages
|
|
||||||
that appear when `invoke.py` is first starting up. One of the earlier lines
|
|
||||||
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
|
||||||
and on Macintoshes, it should say "mps". If instead the message says it is
|
|
||||||
running on "cpu", then you may need to install the correct torch library.
|
|
||||||
|
|
||||||
You may be able to fix this by installing a different torch library. Here are
|
|
||||||
the magic incantations for Conda and PIP.
|
|
||||||
|
|
||||||
!!! todo "For CUDA systems"
|
|
||||||
|
|
||||||
- conda
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
|
||||||
```
|
|
||||||
|
|
||||||
- pip
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! todo "For AMD systems"
|
|
||||||
|
|
||||||
- conda
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda activate invokeai
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
```
|
|
||||||
|
|
||||||
- pip
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
```
|
|
||||||
|
|
||||||
More information and troubleshooting tips can be found at https://pytorch.org.
|
|
||||||
@@ -1,115 +0,0 @@
|
|||||||
---
|
|
||||||
title: Installing PyPatchMatch
|
|
||||||
---
|
|
||||||
|
|
||||||
# :octicons-paintbrush-16: Installing PyPatchMatch
|
|
||||||
|
|
||||||
pypatchmatch is a Python module for inpainting images. It is not
|
|
||||||
needed to run InvokeAI, but it greatly improves the quality of
|
|
||||||
inpainting and outpainting and is recommended.
|
|
||||||
|
|
||||||
Unfortunately, it is a C++ optimized module and installation
|
|
||||||
can be somewhat challenging. This guide leads you through the steps.
|
|
||||||
|
|
||||||
## Windows
|
|
||||||
|
|
||||||
You're in luck! On Windows platforms PyPatchMatch will install
|
|
||||||
automatically on Windows systems with no extra intervention.
|
|
||||||
|
|
||||||
## Macintosh
|
|
||||||
|
|
||||||
PyPatchMatch is not currently supported, but the team is working on
|
|
||||||
it.
|
|
||||||
|
|
||||||
## Linux
|
|
||||||
|
|
||||||
Prior to installing PyPatchMatch, you need to take the following
|
|
||||||
steps:
|
|
||||||
|
|
||||||
### Debian Based Distros
|
|
||||||
|
|
||||||
|
|
||||||
1. Install the `build-essential` tools:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install build-essential
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Install `opencv`:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo apt install python3-opencv libopencv-dev
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Fix the naming of the `opencv` package configuration file:
|
|
||||||
|
|
||||||
```
|
|
||||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
|
||||||
ln -sf opencv4.pc opencv.pc
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Activate the environment you use for invokeai, either with
|
|
||||||
`conda` or with a virtual environment.
|
|
||||||
|
|
||||||
5. Do a "develop" install of pypatchmatch:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
|
|
||||||
```
|
|
||||||
|
|
||||||
6. Confirm that pypatchmatch is installed.
|
|
||||||
At the command-line prompt enter `python`, and
|
|
||||||
then at the `>>>` line type `from patchmatch import patch_match`:
|
|
||||||
It should look like the follwing:
|
|
||||||
|
|
||||||
```
|
|
||||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
|
||||||
[GCC 9.3.0] on linux
|
|
||||||
Type "help", "copyright", "credits" or "license" for more information.
|
|
||||||
>>> from patchmatch import patch_match
|
|
||||||
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
|
||||||
rm -rf build/obj libpatchmatch.so
|
|
||||||
mkdir: created directory 'build/obj'
|
|
||||||
mkdir: created directory 'build/obj/csrc/'
|
|
||||||
[dep] csrc/masked_image.cpp ...
|
|
||||||
[dep] csrc/nnf.cpp ...
|
|
||||||
[dep] csrc/inpaint.cpp ...
|
|
||||||
[dep] csrc/pyinterface.cpp ...
|
|
||||||
[CC] csrc/pyinterface.cpp ...
|
|
||||||
[CC] csrc/inpaint.cpp ...
|
|
||||||
[CC] csrc/nnf.cpp ...
|
|
||||||
[CC] csrc/masked_image.cpp ...
|
|
||||||
[link] libpatchmatch.so ...
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Arch Based Distros
|
|
||||||
|
|
||||||
1. Install the `base-devel` package:
|
|
||||||
```
|
|
||||||
sudo pacman -Syu
|
|
||||||
sudo pacman -S --needed base-devel
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Install `opencv`:
|
|
||||||
```
|
|
||||||
sudo pacman -S opencv
|
|
||||||
```
|
|
||||||
or for CUDA support
|
|
||||||
```
|
|
||||||
sudo pacman -S opencv-cuda
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Fix the naming of the `opencv` package configuration file:
|
|
||||||
```
|
|
||||||
cd /usr/lib/pkgconfig/
|
|
||||||
ln -sf opencv4.pc opencv.pc
|
|
||||||
```
|
|
||||||
|
|
||||||
**Next, Follow Steps 4-6 from the Debian Section above**
|
|
||||||
|
|
||||||
|
|
||||||
If you see no errors, then you're ready to go!
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
---
|
|
||||||
title: build binary installers
|
|
||||||
---
|
|
||||||
|
|
||||||
# :simple-buildkite: How to build "binary" installers (InvokeAI-mac/windows/linux_on_*.zip)
|
|
||||||
|
|
||||||
## 1. Ensure `installers/requirements.in` is correct
|
|
||||||
|
|
||||||
and up to date on the branch to be installed.
|
|
||||||
|
|
||||||
## <a name="step-2"></a> 2. Run `pip-compile` on each platform.
|
|
||||||
|
|
||||||
On each target platform, in the branch that is to be installed, and
|
|
||||||
inside the InvokeAI git root folder, run the following commands:
|
|
||||||
|
|
||||||
```commandline
|
|
||||||
conda activate invokeai # or however you activate python
|
|
||||||
pip install pip-tools
|
|
||||||
pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/<reqsfile>.txt binary_installer/requirements.in
|
|
||||||
```
|
|
||||||
where `<reqsfile>.txt` is whichever of
|
|
||||||
```commandline
|
|
||||||
py3.10-darwin-arm64-mps-reqs.txt
|
|
||||||
py3.10-darwin-x86_64-reqs.txt
|
|
||||||
py3.10-linux-x86_64-cuda-reqs.txt
|
|
||||||
py3.10-windows-x86_64-cuda-reqs.txt
|
|
||||||
```
|
|
||||||
matches the current OS and architecture.
|
|
||||||
> There is no way to cross-compile these. They must be done on a system matching the target OS and arch.
|
|
||||||
|
|
||||||
## <a name="step-3"></a> 3. Set github repository and branch
|
|
||||||
|
|
||||||
Once all reqs files have been collected and committed **to the branch
|
|
||||||
to be installed**, edit `binary_installer/install.sh.in` and `binary_installer/install.bat.in` so that `RELEASE_URL`
|
|
||||||
and `RELEASE_SOURCEBALL` point to the github repo and branch that is
|
|
||||||
to be installed.
|
|
||||||
|
|
||||||
For example, to install `main` branch of `InvokeAI`, they should be
|
|
||||||
set as follows:
|
|
||||||
|
|
||||||
`install.sh.in`:
|
|
||||||
```commandline
|
|
||||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
|
||||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
`install.bat.in`:
|
|
||||||
```commandline
|
|
||||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
|
||||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
Or, to install `damians-cool-feature` branch of `damian0815`, set them
|
|
||||||
as follows:
|
|
||||||
|
|
||||||
`install.sh.in`:
|
|
||||||
```commandline
|
|
||||||
RELEASE_URL=https://github.com/damian0815/InvokeAI
|
|
||||||
RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
`install.bat.in`:
|
|
||||||
```commandline
|
|
||||||
set RELEASE_URL=https://github.com/damian0815/InvokeAI
|
|
||||||
set RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
The branch and repo specified here **must** contain the correct reqs
|
|
||||||
files. The installer zip files **do not** contain requirements files,
|
|
||||||
they are pulled from the specified branch during the installation
|
|
||||||
process.
|
|
||||||
|
|
||||||
## 4. Create zip files.
|
|
||||||
|
|
||||||
cd into the `installers/` folder and run
|
|
||||||
`./create_installers.sh`. This will create
|
|
||||||
`InvokeAI-mac_on_<branch>.zip`,
|
|
||||||
`InvokeAI-windows_on_<branch>.zip` and
|
|
||||||
`InvokeAI-linux_on_<branch>.zip`. These files can be distributed to end users.
|
|
||||||
|
|
||||||
These zips will continue to function as installers for all future
|
|
||||||
pushes to those branches, as long as necessary changes to
|
|
||||||
`requirements.in` are propagated in a timely manner to the
|
|
||||||
`py3.10-*-reqs.txt` files using pip-compile as outlined in [step
|
|
||||||
2](#step-2).
|
|
||||||
|
|
||||||
To actually install, users should unzip the appropriate zip file into an empty
|
|
||||||
folder and run `install.sh` on macOS/Linux or `install.bat` on
|
|
||||||
Windows.
|
|
||||||
@@ -56,7 +56,7 @@ unofficial Stable Diffusion models and where they can be obtained.
|
|||||||
|
|
||||||
There are three ways to install weights files:
|
There are three ways to install weights files:
|
||||||
|
|
||||||
1. During InvokeAI installation, the `configure_invokeai.py` script can download
|
1. During InvokeAI installation, the `preload_models.py` script can download
|
||||||
them for you.
|
them for you.
|
||||||
|
|
||||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||||
@@ -65,13 +65,13 @@ There are three ways to install weights files:
|
|||||||
3. You can download the files manually and add the appropriate entries to
|
3. You can download the files manually and add the appropriate entries to
|
||||||
`models.yaml`.
|
`models.yaml`.
|
||||||
|
|
||||||
### Installation via `configure_invokeai.py`
|
### Installation via `preload_models.py`
|
||||||
|
|
||||||
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
|
This is the most automatic way. Run `scripts/preload_models.py` from the
|
||||||
console. It will ask you to select which models to download and lead you through
|
console. It will ask you to select which models to download and lead you through
|
||||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||||
|
|
||||||
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
|
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
||||||
directory
|
directory
|
||||||
|
|
||||||
!!! example ""
|
!!! example ""
|
||||||
@@ -100,7 +100,7 @@ directory
|
|||||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||||
Download? [n] n
|
Download? [n] n
|
||||||
[4] waifu-diffusion-1.3:
|
[4] waifu-diffusion-1.3:
|
||||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
||||||
Download? [n] y
|
Download? [n] y
|
||||||
[5] ft-mse-improved-autoencoder-840000:
|
[5] ft-mse-improved-autoencoder-840000:
|
||||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
||||||
@@ -162,12 +162,6 @@ the command-line client's `!import_model` command.
|
|||||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||||
possible completions.
|
possible completions.
|
||||||
|
|
||||||
!!! tip "on Windows, you can drag model files onto the command-line"
|
|
||||||
|
|
||||||
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
|
||||||
onto the command-line to insert the model path. This way, you don't need to
|
|
||||||
type it or copy/paste.
|
|
||||||
|
|
||||||
4. Follow the wizard's instructions to complete installation as shown in the
|
4. Follow the wizard's instructions to complete installation as shown in the
|
||||||
example here:
|
example here:
|
||||||
|
|
||||||
@@ -244,7 +238,7 @@ arabian-nights-1.0:
|
|||||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||||
| description | Any description that you want to add to the model to remind you what it is. |
|
| description | Any description that you want to add to the model to remind you what it is. |
|
||||||
| weights | Relative path to the .ckpt weights file for this model. |
|
| weights | Relative path to the .ckpt weights file for this model. |
|
||||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
|
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
||||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
010_INSTALL_AUTOMATED.md
|
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
title: Installing with Docker
|
title: Docker
|
||||||
---
|
---
|
||||||
|
|
||||||
# :fontawesome-brands-docker: Docker
|
# :fontawesome-brands-docker: Docker
|
||||||
|
|
||||||
!!! warning "For end users"
|
!!! warning "For end users"
|
||||||
|
|
||||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||||
|
|
||||||
!!! tip "For developers"
|
!!! tip "For developers"
|
||||||
|
|
||||||
@@ -16,10 +16,6 @@ title: Installing with Docker
|
|||||||
|
|
||||||
For general use, install locally to leverage your machine's GPU.
|
For general use, install locally to leverage your machine's GPU.
|
||||||
|
|
||||||
!!! tip "For running on a cloud instance/service"
|
|
||||||
|
|
||||||
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||||
@@ -40,7 +36,7 @@ development purposes it's fine. Once you're done with development tasks on your
|
|||||||
laptop you can build for the target platform and architecture and deploy to
|
laptop you can build for the target platform and architecture and deploy to
|
||||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||||
|
|
||||||
## Installation in a Linux container (desktop)
|
## Installation on a Linux container
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
@@ -76,20 +72,14 @@ created in the last step.
|
|||||||
|
|
||||||
Some Suggestions of variables you may want to change besides the Token:
|
Some Suggestions of variables you may want to change besides the Token:
|
||||||
|
|
||||||
<figure markdown>
|
| Environment-Variable | Default value | Description |
|
||||||
|
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
|
||||||
| Environment-Variable | Default value | Description |
|
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
|
||||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
| `ARCH` | x86_64 | if you are using a ARM based CPU |
|
||||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
|
||||||
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
|
||||||
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
|
||||||
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
|
| `INVOKEAI_BRANCH` | main | the branch to checkout |
|
||||||
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
|
|
||||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
|
||||||
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
|
|
||||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
|
||||||
@@ -116,115 +106,15 @@ When used without arguments, the container will start the webserver and provide
|
|||||||
you the link to open it. But if you want to use some other parameters you can
|
you the link to open it. But if you want to use some other parameters you can
|
||||||
also do so.
|
also do so.
|
||||||
|
|
||||||
!!! example "run script example"
|
!!! example ""
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
The output folder is located on the volume which is also used to store the model.
|
||||||
|
|
||||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Running the container on your GPU
|
|
||||||
|
|
||||||
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
|
|
||||||
environment variable to enable GPU usage and have the process run much faster:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
GPU_FLAGS=all ./docker-build/run.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
This passes the `--gpus all` to docker and uses the GPU.
|
|
||||||
|
|
||||||
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
|
|
||||||
|
|
||||||
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
|
||||||
|
|
||||||
You can use the full set of GPU combinations documented here:
|
|
||||||
|
|
||||||
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
|
||||||
|
|
||||||
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
|
|
||||||
|
|
||||||
## Running InvokeAI in the cloud with Docker
|
|
||||||
|
|
||||||
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
|
||||||
|
|
||||||
An advantage of this method is that it does not need any local setup or additional dependencies.
|
|
||||||
|
|
||||||
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
- a `docker` runtime
|
|
||||||
- `make` (optional but helps for convenience)
|
|
||||||
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
|
||||||
|
|
||||||
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
|
||||||
|
|
||||||
### Building and running the image locally
|
|
||||||
|
|
||||||
1. Clone this repo and `cd docker-build`
|
|
||||||
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
|
||||||
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
|
||||||
- `make configure` (This does *not* require a GPU-capable system)
|
|
||||||
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
|
||||||
- enter your Huggingface token when prompted
|
|
||||||
1. `make web`
|
|
||||||
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
|
||||||
|
|
||||||
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
|
||||||
|
|
||||||
#### Building and running without `make`
|
|
||||||
|
|
||||||
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
|
||||||
|
|
||||||
!!! example "Build the image and configure the runtime directory"
|
|
||||||
```Shell
|
|
||||||
cd docker-build
|
|
||||||
|
|
||||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
|
||||||
|
|
||||||
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! example "Run the web server"
|
|
||||||
```Shell
|
|
||||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Access the Web UI at http://localhost:9090
|
|
||||||
|
|
||||||
!!! example "Run the InvokeAI interactive CLI"
|
|
||||||
```
|
|
||||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Running the image in the cloud
|
|
||||||
|
|
||||||
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
|
||||||
|
|
||||||
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
|
||||||
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
|
||||||
1. `docker pull` it on your cloud instance
|
|
||||||
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
|
||||||
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
|
||||||
|
|
||||||
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
|
||||||
|
|
||||||
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
|
||||||
|
|
||||||
1. create a pod using this Docker image
|
|
||||||
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
|
||||||
1. Run the pod with `sleep infinity` as the Docker command
|
|
||||||
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
|
||||||
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
|
||||||
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
|
||||||
|
|
||||||
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -240,12 +130,12 @@ Running on other cloud providers such as Vast.ai will likely work in a similar f
|
|||||||
If you're on a **Linux container** the `invoke` script is **automatically
|
If you're on a **Linux container** the `invoke` script is **automatically
|
||||||
started** and the output dir set to the Docker volume you created earlier.
|
started** and the output dir set to the Docker volume you created earlier.
|
||||||
|
|
||||||
If you're **directly on macOS follow these startup instructions**.
|
If you're **directly on macOS follow these startup instructions**.
|
||||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
With the Conda environment activated (`conda activate ldm`), run the interactive
|
||||||
interface that combines the functionality of the original scripts `txt2img` and
|
interface that combines the functionality of the original scripts `txt2img` and
|
||||||
`img2img`:
|
`img2img`:
|
||||||
Use the more accurate but VRAM-intensive full precision math because
|
Use the more accurate but VRAM-intensive full precision math because
|
||||||
half-precision requires autocast and won't work.
|
half-precision requires autocast and won't work.
|
||||||
By default the images are saved in `outputs/img-samples/`.
|
By default the images are saved in `outputs/img-samples/`.
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
@@ -262,8 +152,8 @@ invoke> q
|
|||||||
### Text to Image
|
### Text to Image
|
||||||
|
|
||||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||||
image. This will let you know that everything is set up correctly.
|
image. This will let you know that everything is set up correctly.
|
||||||
Then increase steps to 100 or more for good (but slower) results.
|
Then increase steps to 100 or more for good (but slower) results.
|
||||||
The prompt can be in quotes or not.
|
The prompt can be in quotes or not.
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
@@ -277,8 +167,8 @@ You'll need to experiment to see if face restoration is making it better or
|
|||||||
worse for your specific prompt.
|
worse for your specific prompt.
|
||||||
|
|
||||||
If you're on a container the output is set to the Docker volume. You can copy it
|
If you're on a container the output is set to the Docker volume. You can copy it
|
||||||
wherever you want.
|
wherever you want.
|
||||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
||||||
`*.png` so you'll need to specify the image file name.
|
`*.png` so you'll need to specify the image file name.
|
||||||
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
---
|
---
|
||||||
title: InvokeAI Binary Installer
|
title: InvokeAI Installer
|
||||||
---
|
---
|
||||||
|
|
||||||
The InvokeAI binary installer is a shell script that will install InvokeAI onto a stock
|
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||||
with a version that runs a stable version of InvokeAI. When a new version of
|
with a version that runs a stable version of InvokeAI. When a new version of
|
||||||
InvokeAI is released, you will download and reinstall the new version.
|
InvokeAI is released, you will download and reinstall the new version.
|
||||||
@@ -10,16 +10,7 @@ InvokeAI is released, you will download and reinstall the new version.
|
|||||||
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||||
potentially unstable new features, you should consider using the
|
potentially unstable new features, you should consider using the
|
||||||
[source installer](INSTALL_SOURCE.md) or one of the
|
[source installer](INSTALL_SOURCE.md) or one of the
|
||||||
[manual install](../020_INSTALL_MANUAL.md) methods.
|
[manual install](INSTALL_MANUAL.md) methods.
|
||||||
|
|
||||||
**Important Caveats**
|
|
||||||
- This script does not support AMD GPUs. For Linux AMD support,
|
|
||||||
please use the manual or source code installer methods.
|
|
||||||
|
|
||||||
- This script has difficulty on some Macintosh machines
|
|
||||||
that have previously been used for Python development due to
|
|
||||||
conflicting development tools versions. Mac developers may wish
|
|
||||||
to try the source code installer or one of the manual methods instead.
|
|
||||||
|
|
||||||
!!! todo
|
!!! todo
|
||||||
|
|
||||||
@@ -36,7 +27,7 @@ recommended model weights files.
|
|||||||
|
|
||||||
1. Download the
|
1. Download the
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||||
InvokeAI's installer for your platform. Look for a file named `InvokeAI-binary-<your platform>.zip`
|
InvokeAI's installer for your platform
|
||||||
|
|
||||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||||
27
docs/installation/INSTALL_JUPYTER.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||||
|
---
|
||||||
|
|
||||||
|
# THIS NEEDS TO BE FLESHED OUT
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
We have a [Jupyter
|
||||||
|
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||||
|
with cell-by-cell installation steps. It will download the code in
|
||||||
|
this repo as one of the steps, so instead of cloning this repo, simply
|
||||||
|
download the notebook from the link above and load it up in VSCode
|
||||||
|
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||||
|
start running the cells one-by-one.
|
||||||
|
|
||||||
|
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||||
|
|
||||||
|
## Walkthrough
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
### Updating the stable version
|
||||||
|
|
||||||
|
### Updating to the development version
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
@@ -8,7 +8,7 @@ title: Manual Installation
|
|||||||
|
|
||||||
!!! warning "This is for advanced Users"
|
!!! warning "This is for advanced Users"
|
||||||
|
|
||||||
who are already experienced with using conda or pip
|
who are already expirienced with using conda or pip
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
@@ -121,8 +121,8 @@ command-line completion.
|
|||||||
dir
|
dir
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||||
|
|
||||||
6. Create the conda environment:
|
6. Create the conda environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -155,10 +155,10 @@ command-line completion.
|
|||||||
process for this is described in [here](INSTALLING_MODELS.md).
|
process for this is described in [here](INSTALLING_MODELS.md).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/configure_invokeai.py
|
python scripts/preload_models.py
|
||||||
```
|
```
|
||||||
|
|
||||||
The script `configure_invokeai.py` will interactively guide you through the
|
The script `preload_models.py` will interactively guide you through the
|
||||||
process of downloading and installing the weights files needed for InvokeAI.
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
agreement that you have to agree to. The script will list the steps you need
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
@@ -220,7 +220,7 @@ greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
|||||||
```bash
|
```bash
|
||||||
git pull
|
git pull
|
||||||
conda env update
|
conda env update
|
||||||
python scripts/configure_invokeai.py --no-interactive #optional
|
python scripts/preload_models.py --no-interactive #optional
|
||||||
```
|
```
|
||||||
|
|
||||||
This will bring your local copy into sync with the remote one. The last step may
|
This will bring your local copy into sync with the remote one. The last step may
|
||||||
@@ -346,20 +346,7 @@ script runs without errors. Please report to
|
|||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||||
to work around the problem so that others can benefit from your investigation.
|
to work around the problem so that others can benefit from your investigation.
|
||||||
|
|
||||||
### Create Conda Environment fails on MacOS
|
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||||
|
|
||||||
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
|
||||||
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
|
||||||
Start by installing additional XCode command line tools, followed by brew install llvm.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
xcode-select --install
|
|
||||||
brew install llvm
|
|
||||||
```
|
|
||||||
|
|
||||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
|
||||||
|
|
||||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
|
||||||
|
|
||||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||||
have linked to the correct environment file and run `conda update` again.
|
have linked to the correct environment file and run `conda update` again.
|
||||||
|
|||||||
156
docs/installation/INSTALL_SOURCE.md
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
---
|
||||||
|
title: Source Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
# The InvokeAI Source Installer
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The source installer is a shell script that attempts to automate every step
|
||||||
|
needed to install and run InvokeAI on a stock computer running recent versions
|
||||||
|
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||||
|
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||||
|
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||||
|
|
||||||
|
Before you begin, make sure that you meet the
|
||||||
|
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||||
|
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||||
|
installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
Installation requires roughly 18G of free disk space to load the libraries and
|
||||||
|
recommended model weights files.
|
||||||
|
|
||||||
|
## Walk through
|
||||||
|
|
||||||
|
Though there are multiple steps, there really is only one click involved to kick
|
||||||
|
off the process.
|
||||||
|
|
||||||
|
1. The source installer is distributed in ZIP files. Go to the
|
||||||
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||||
|
look for a series of files named:
|
||||||
|
|
||||||
|
- invokeAI-src-installer-mac.zip
|
||||||
|
- invokeAI-src-installer-windows.zip
|
||||||
|
- invokeAI-src-installer-linux.zip
|
||||||
|
|
||||||
|
Download the one that is appropriate for your operating system.
|
||||||
|
|
||||||
|
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||||
|
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||||
|
|
||||||
|
This will create a new directory named "InvokeAI". This example shows how
|
||||||
|
this would look using the `unzip` command-line tool, but you may use any
|
||||||
|
graphical or command-line Zip extractor:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||||
|
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||||
|
creating: invokeAI\
|
||||||
|
inflating: invokeAI\install.bat
|
||||||
|
inflating: invokeAI\readme.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||||
|
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||||
|
Macintosh systems.
|
||||||
|
|
||||||
|
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeAI
|
||||||
|
C:\Documents\Linco\invokeAI> install.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Sit back and let the install script work. It will install various binary
|
||||||
|
requirements including Conda, Git and Python, then download the current
|
||||||
|
InvokeAI code and install it along with its dependencies.
|
||||||
|
|
||||||
|
6. After installation completes, the installer will launch a script called
|
||||||
|
`preload_models.py`, which will guide you through the first-time process of
|
||||||
|
selecting one or more Stable Diffusion model weights files, downloading and
|
||||||
|
configuring them.
|
||||||
|
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you must agree to in order to use. The script will list the
|
||||||
|
steps you need to take to create an account on the official site that hosts
|
||||||
|
the weights files, accept the agreement, and provide an access token that
|
||||||
|
allows InvokeAI to legally download and install the weights files.
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
7. The script will now exit and you'll be ready to generate some images. The
|
||||||
|
invokeAI directory will contain numerous files. Look for a shell script
|
||||||
|
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||||
|
by double-clicking it or typing its name at the command-line:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeAI
|
||||||
|
C:\Documents\Linco\invokeAI> invoke.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||||
|
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||||
|
load the user interface by pointing your browser at http://localhost:9090.
|
||||||
|
|
||||||
|
The `invoke` script also offers you a third option labeled "open the developer
|
||||||
|
console". If you choose this option, you will be dropped into a command-line
|
||||||
|
interface in which you can run python commands directly, access developer tools,
|
||||||
|
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||||
|
the script `scripts/invoke.py` as shown in this example:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
python scripts/invoke.py --web --max_load_models=3 \
|
||||||
|
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||||
|
```
|
||||||
|
|
||||||
|
These options are described in detail in the
|
||||||
|
[Command-Line Interface](../features/CLI.md) documentation.
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
This section describes how to update InvokeAI to new versions of the software.
|
||||||
|
|
||||||
|
### Updating the stable version
|
||||||
|
|
||||||
|
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||||
|
To update to the latest released version (recommended), run the `update.sh`
|
||||||
|
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||||
|
release and re-run the `preload_models` script to download any updated models
|
||||||
|
files that may be needed. You can also use this to add additional models that
|
||||||
|
you did not select at installation time.
|
||||||
|
|
||||||
|
### Updating to the development version
|
||||||
|
|
||||||
|
There may be times that there is a feature in the `development` branch of
|
||||||
|
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||||
|
corrects an annoying bug. To do this, you will use the developer's console.
|
||||||
|
|
||||||
|
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||||
|
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||||
|
Then run the following command to get the `development branch`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout development
|
||||||
|
git pull
|
||||||
|
conda env update
|
||||||
|
```
|
||||||
|
|
||||||
|
You can now close the developer console and run `invoke` as before. If you get
|
||||||
|
complaints about missing models, then you may need to do the additional step of
|
||||||
|
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||||
|
simply open up the developer's console again and type
|
||||||
|
`python scripts/preload_models.py`.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
|
available to help you. Either create an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||||
|
make a request for help on the "bugs-and-support" channel of our
|
||||||
|
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||||
|
organization, but typically somebody will be available to help you within 24
|
||||||
|
hours, and often much sooner.
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
|
||||||
---
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
We have a [Jupyter
|
|
||||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
|
||||||
with cell-by-cell installation steps. It will download the code in
|
|
||||||
this repo as one of the steps, so instead of cloning this repo, simply
|
|
||||||
download the notebook from the link above and load it up in VSCode
|
|
||||||
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
|
||||||
start running the cells one-by-one.
|
|
||||||
|
|
||||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
|
||||||
|
|
||||||
## Running Online On Google Colabotary
|
|
||||||
[](https://colab.research.google.com/github/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
|
||||||
|
|
||||||
## Running Locally (Cloning)
|
|
||||||
|
|
||||||
1. Install the Jupyter Notebook python library (one-time):
|
|
||||||
pip install jupyter
|
|
||||||
|
|
||||||
2. Clone the InvokeAI repository:
|
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
|
||||||
cd invoke-ai
|
|
||||||
3. Create a virtual environment using conda:
|
|
||||||
conda create -n invoke jupyter
|
|
||||||
4. Activate the environment and start the Jupyter notebook:
|
|
||||||
conda activate invoke
|
|
||||||
jupyter notebook
|
|
||||||
@@ -1,225 +0,0 @@
|
|||||||
---
|
|
||||||
title: Source Installer
|
|
||||||
---
|
|
||||||
|
|
||||||
# The InvokeAI Source Installer
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
The source installer is a shell script that attempts to automate every step
|
|
||||||
needed to install and run InvokeAI on a stock computer running recent versions
|
|
||||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
|
||||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
|
||||||
|
|
||||||
Before you begin, make sure that you meet the
|
|
||||||
[hardware requirements](../../index.md#hardware-requirements) and has the appropriate
|
|
||||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
|
||||||
installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
Installation requires roughly 18G of free disk space to load the libraries and
|
|
||||||
recommended model weights files.
|
|
||||||
|
|
||||||
## Walk through
|
|
||||||
|
|
||||||
Though there are multiple steps, there really is only one click involved to kick
|
|
||||||
off the process.
|
|
||||||
|
|
||||||
1. The source installer is distributed in ZIP files. Go to the
|
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
|
||||||
look for a series of files named:
|
|
||||||
|
|
||||||
- [invokeAI-src-installer-2.2.3-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-mac.zip)
|
|
||||||
- [invokeAI-src-installer-2.2.3-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-windows.zip)
|
|
||||||
- [invokeAI-src-installer-2.2.3-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-linux.zip)
|
|
||||||
|
|
||||||
Download the one that is appropriate for your operating system.
|
|
||||||
|
|
||||||
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
|
||||||
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
|
||||||
|
|
||||||
This will create a new directory named "InvokeAI". This example shows how
|
|
||||||
this would look using the `unzip` command-line tool, but you may use any
|
|
||||||
graphical or command-line Zip extractor:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> unzip invokeAI-windows.zip
|
|
||||||
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
|
||||||
creating: invokeAI\
|
|
||||||
inflating: invokeAI\install.bat
|
|
||||||
inflating: invokeAI\readme.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
3. If you are a macOS user, you may need to install the Xcode command line tools.
|
|
||||||
These are a set of tools that are needed to run certain applications in a Terminal,
|
|
||||||
including InvokeAI. This package is provided directly by Apple.
|
|
||||||
|
|
||||||
To install, open a terminal window and run `xcode-select --install`. You will get
|
|
||||||
a macOS system popup guiding you through the install. If you already have them
|
|
||||||
installed, you will instead see some output in the Terminal advising you that the
|
|
||||||
tools are already installed.
|
|
||||||
|
|
||||||
More information can be found here:
|
|
||||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
|
||||||
|
|
||||||
4. If you are using a desktop GUI, double-click the installer file. It will be
|
|
||||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
|
||||||
Macintosh systems.
|
|
||||||
|
|
||||||
5. Alternatively, from the command line, run the shell script or .bat file:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd invokeAI
|
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
6. Sit back and let the install script work. It will install various binary
|
|
||||||
requirements including Conda, Git and Python, then download the current
|
|
||||||
InvokeAI code and install it along with its dependencies.
|
|
||||||
|
|
||||||
Be aware that some of the library download and install steps take a long time.
|
|
||||||
In particular, the `pytorch` package is quite large and often appears to get
|
|
||||||
"stuck" at 99.9%. Similarly, the `pip installing requirements` step may
|
|
||||||
appear to hang. Have patience and the installation step will eventually
|
|
||||||
resume. However, there are occasions when the library install does
|
|
||||||
legitimately get stuck. If you have been waiting for more than ten minutes
|
|
||||||
and nothing is happening, you can interrupt the script with ^C. You may restart
|
|
||||||
it and it will pick up where it left off.
|
|
||||||
|
|
||||||
7. After installation completes, the installer will launch a script called
|
|
||||||
`configure_invokeai.py`, which will guide you through the first-time process of
|
|
||||||
selecting one or more Stable Diffusion model weights files, downloading and
|
|
||||||
configuring them.
|
|
||||||
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you must agree to in order to use. The script will list the
|
|
||||||
steps you need to take to create an account on the official site that hosts
|
|
||||||
the weights files, accept the agreement, and provide an access token that
|
|
||||||
allows InvokeAI to legally download and install the weights files.
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [Installing Models](../050_INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
8. The script will now exit and you'll be ready to generate some images. The
|
|
||||||
invokeAI directory will contain numerous files. Look for a shell script
|
|
||||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
|
||||||
by double-clicking it or typing its name at the command-line:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd invokeAI
|
|
||||||
C:\Documents\Linco\invokeAI> invoke.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
|
||||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
|
||||||
load the user interface by pointing your browser at http://localhost:9090.
|
|
||||||
|
|
||||||
The `invoke` script also offers you a third option labeled "open the developer
|
|
||||||
console". If you choose this option, you will be dropped into a command-line
|
|
||||||
interface in which you can run python commands directly, access developer tools,
|
|
||||||
and launch InvokeAI with customized options. To do the latter, you would launch
|
|
||||||
the script `scripts/invoke.py` as shown in this example:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
python scripts/invoke.py --web --max_load_models=3 \
|
|
||||||
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
|
||||||
```
|
|
||||||
|
|
||||||
These options are described in detail in the
|
|
||||||
[Command-Line Interface](../../features/CLI.md) documentation.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
_Package dependency conflicts_ If you have previously installed
|
|
||||||
InvokeAI or another Stable Diffusion package, the installer may
|
|
||||||
occasionally pick up outdated libraries and either the installer or
|
|
||||||
`invoke` will fail with complaints out library conflicts. There are
|
|
||||||
two steps you can take to clear this problem. Both of these are done
|
|
||||||
from within the "developer's console", which you can get to by
|
|
||||||
launching `invoke.sh` (or `invoke.bat`) and selecting launch option
|
|
||||||
#3:
|
|
||||||
|
|
||||||
1. Remove the previous `invokeai` environment completely. From within
|
|
||||||
the developer's console, give the command `conda env remove -n
|
|
||||||
invokeai`. This will delete previous files installed by `invoke`.
|
|
||||||
|
|
||||||
Then exit from the developer's console and launch the script
|
|
||||||
`update.sh` (or `update.bat`). This will download the most recent
|
|
||||||
InvokeAI (including bug fixes) and reinstall the environment.
|
|
||||||
You should then be able to run `invoke.sh`/`invoke.bat`.
|
|
||||||
|
|
||||||
2. If this doesn't work, you can try cleaning your system's conda
|
|
||||||
cache. This is slightly more extreme, but won't interfere with
|
|
||||||
any other python-based programs installed on your computer.
|
|
||||||
From the developer's console, run the command `conda clean -a`
|
|
||||||
and answer "yes" to all prompts.
|
|
||||||
|
|
||||||
After this is done, run `update.sh` and try again as before.
|
|
||||||
|
|
||||||
_"Corrupted configuration file."__ Everything seems to install ok, but
|
|
||||||
`invoke` complains of a corrupted configuration file and goes calls
|
|
||||||
`configure_invokeai.py` to fix, but this doesn't fix the problem.
|
|
||||||
|
|
||||||
This issue is often caused by a misconfigured configuration directive
|
|
||||||
in the `.invokeai` initialization file that contains startup settings.
|
|
||||||
This can be corrected by fixing the offending line.
|
|
||||||
|
|
||||||
First find `.invokeai`. It is a small text file located in your home
|
|
||||||
directory, `~/.invokeai` on Mac and Linux systems, and `C:\Users\*your
|
|
||||||
name*\.invokeai` on Windows systems. Open it with a text editor
|
|
||||||
(e.g. Notepad on Windows, TextEdit on Macs, or `nano` on Linux)
|
|
||||||
and look for the lines starting with `--root` and `--outdir`.
|
|
||||||
|
|
||||||
An example is here:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
--root="/home/lstein/invokeai"
|
|
||||||
--outdir="/home/lstein/invokeai/outputs"
|
|
||||||
```
|
|
||||||
|
|
||||||
There should not be whitespace before or after the directory paths,
|
|
||||||
and the paths should not end with slashes:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
--root="/home/lstein/invokeai " # wrong! no whitespace here
|
|
||||||
--root="/home\lstein\invokeai\" # wrong! shouldn't end in a slash
|
|
||||||
```
|
|
||||||
|
|
||||||
Fix the problem with your text editor and save as a **plain text**
|
|
||||||
file. This should clear the issue.
|
|
||||||
|
|
||||||
_If none of these maneuvers fixes the problem_ then please report the
|
|
||||||
problem to the [InvokeAI
|
|
||||||
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
|
||||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
|
||||||
|
|
||||||
## Updating to newer versions
|
|
||||||
|
|
||||||
This section describes how to update InvokeAI to new versions of the software.
|
|
||||||
|
|
||||||
### Updating the stable version
|
|
||||||
|
|
||||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
|
||||||
To update to the latest released version (recommended), run the `update.sh`
|
|
||||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
|
||||||
release and re-run the `configure_invokeai` script to download any updated models
|
|
||||||
files that may be needed. You can also use this to add additional models that
|
|
||||||
you did not select at installation time.
|
|
||||||
|
|
||||||
You can now close the developer console and run `invoke` as before. If you get
|
|
||||||
complaints about missing models, then you may need to do the additional step of
|
|
||||||
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
|
||||||
simply open up the developer's console again and type
|
|
||||||
`python scripts/configure_invokeai.py`.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
|
||||||
available to help you. Either create an
|
|
||||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
|
||||||
make a request for help on the "bugs-and-support" channel of our
|
|
||||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
|
||||||
organization, but typically somebody will be available to help you within 24
|
|
||||||
hours, and often much sooner.
|
|
||||||
@@ -5,29 +5,55 @@ title: Overview
|
|||||||
We offer several ways to install InvokeAI, each one suited to your
|
We offer several ways to install InvokeAI, each one suited to your
|
||||||
experience and preferences.
|
experience and preferences.
|
||||||
|
|
||||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||||
|
|
||||||
This is a script that will install all of InvokeAI's essential
|
This is a installer script that installs InvokeAI and all the
|
||||||
third party libraries and InvokeAI itself. It includes access to a
|
third party libraries it depends on. When a new version of
|
||||||
"developer console" which will help us debug problems with you and
|
InvokeAI is released, you will download and reinstall the new
|
||||||
give you to access experimental features.
|
version.
|
||||||
|
|
||||||
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
This installer is designed for people who want the system to "just
|
||||||
|
work", don't have an interest in tinkering with it, and do not
|
||||||
|
care about upgrading to unreleased experimental features.
|
||||||
|
|
||||||
|
*Note that this script has difficulty on some Macintosh machines
|
||||||
|
that have previously been used for Python development due to
|
||||||
|
conflicting development tools versions. Mac developers may wish
|
||||||
|
to try method (2) or one of the manual methods instead.
|
||||||
|
|
||||||
|
2. [Source code installer](INSTALL_SOURCE.md)
|
||||||
|
|
||||||
|
This is a script that will install InvokeAI and all its essential
|
||||||
|
third party libraries. In contrast to the previous installer, it
|
||||||
|
includes access to a "developer console" which will allow you to
|
||||||
|
access experimental features on the development branch.
|
||||||
|
|
||||||
|
This method is recommended for individuals who are wish to stay
|
||||||
|
on the cutting edge of InvokeAI development and are not afraid
|
||||||
|
of occasional breakage.
|
||||||
|
|
||||||
|
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||||
|
|
||||||
In this method you will manually run the commands needed to install
|
In this method you will manually run the commands needed to install
|
||||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||||
those who prefer the `conda` tool, and one suited to those who prefer
|
those who prefer the `conda` tool, and one suited to those who prefer
|
||||||
`pip` and Python virtual environments. In our hands the pip install
|
`pip` and Python virtual environments.
|
||||||
is faster and more reliable, but your mileage may vary.
|
|
||||||
|
|
||||||
This method is recommended for users who have previously used `conda`
|
This method is recommended for users who have previously used `conda`
|
||||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||||
the cutting edge of future InvokeAI development and is willing to put
|
the cutting edge of future InvokeAI development and is willing to put
|
||||||
up with occasional glitches and breakage.
|
up with occasional glitches and breakage.
|
||||||
|
|
||||||
3. [Docker Installation](040_INSTALL_DOCKER.md)
|
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||||
|
|
||||||
We also offer a method for creating Docker containers containing
|
We also offer a method for creating Docker containers containing
|
||||||
InvokeAI and its dependencies. This method is recommended for
|
InvokeAI and its dependencies. This method is recommended for
|
||||||
individuals with experience with Docker containers and understand
|
individuals with experience with Docker containers and understand
|
||||||
the pluses and minuses of a container-based install.
|
the pluses and minuses of a container-based install.
|
||||||
|
|
||||||
|
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||||
|
|
||||||
|
This method is suitable for running InvokeAI on a Google Colab
|
||||||
|
account. It is recommended for individuals who have previously
|
||||||
|
worked on the Colab and are comfortable with the Jupyter notebook
|
||||||
|
environment.
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ title: Manual Installation, Linux
|
|||||||
machine-learning models:
|
machine-learning models:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python3 scripts/configure_invokeai.py
|
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@@ -79,7 +79,7 @@ title: Manual Installation, Linux
|
|||||||
and obtaining an access token for downloading. It will then download and
|
and obtaining an access token for downloading. It will then download and
|
||||||
install the weights files for you.
|
install the weights files for you.
|
||||||
|
|
||||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing
|
Please look [here](INSTALLING_MODELS.md) for a manual process for doing
|
||||||
the same thing.
|
the same thing.
|
||||||
|
|
||||||
7. Start generating images!
|
7. Start generating images!
|
||||||
@@ -112,7 +112,7 @@ title: Manual Installation, Linux
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
Client](../features/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
@@ -111,7 +111,7 @@ will do our best to help.
|
|||||||
|
|
||||||
!!! todo "Download the model weight files"
|
!!! todo "Download the model weight files"
|
||||||
|
|
||||||
The `configure_invokeai.py` script downloads and installs the model weight
|
The `preload_models.py` script downloads and installs the model weight
|
||||||
files for you. It will lead you through the process of getting a Hugging Face
|
files for you. It will lead you through the process of getting a Hugging Face
|
||||||
account, accepting the Stable Diffusion model weight license agreement, and
|
account, accepting the Stable Diffusion model weight license agreement, and
|
||||||
creating a download token:
|
creating a download token:
|
||||||
@@ -119,7 +119,7 @@ will do our best to help.
|
|||||||
```bash
|
```bash
|
||||||
# This will take some time, depending on the speed of your internet connection
|
# This will take some time, depending on the speed of your internet connection
|
||||||
# and will consume about 10GB of space
|
# and will consume about 10GB of space
|
||||||
python scripts/configure_invokeai.py
|
python scripts/preload_models.py
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! todo "Run InvokeAI!"
|
!!! todo "Run InvokeAI!"
|
||||||
@@ -150,7 +150,7 @@ will do our best to help.
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
Client](../features/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -220,8 +220,8 @@ There are several causes of these errors:
|
|||||||
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
||||||
something else you haven't.
|
something else you haven't.
|
||||||
|
|
||||||
2. You might've run `./scripts/configure_invokeai.py` or `./scripts/invoke.py`
|
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
|
||||||
instead of `python ./scripts/configure_invokeai.py` or
|
instead of `python ./scripts/preload_models.py` or
|
||||||
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
||||||
|
|
||||||
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
||||||
@@ -359,7 +359,7 @@ python ./scripts/txt2img.py \
|
|||||||
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/configure_invokeai.py
|
python scripts/preload_models.py
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -7,7 +7,7 @@ title: Manual Installation, Windows
|
|||||||
## **Notebook install (semi-automated)**
|
## **Notebook install (semi-automated)**
|
||||||
|
|
||||||
We have a
|
We have a
|
||||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||||
with cell-by-cell installation steps. It will download the code in this repo as
|
with cell-by-cell installation steps. It will download the code in this repo as
|
||||||
one of the steps, so instead of cloning this repo, simply download the notebook
|
one of the steps, so instead of cloning this repo, simply download the notebook
|
||||||
from the link above and load it up in VSCode (with the appropriate extensions
|
from the link above and load it up in VSCode (with the appropriate extensions
|
||||||
@@ -65,7 +65,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/configure_invokeai.py
|
python scripts/preload_models.py
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
obtaining an access token for downloading. It will then download and install the
|
obtaining an access token for downloading. It will then download and install the
|
||||||
weights files for you.
|
weights files for you.
|
||||||
|
|
||||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the
|
Please look [here](INSTALLING_MODELS.md) for a manual process for doing the
|
||||||
same thing.
|
same thing.
|
||||||
|
|
||||||
8. Start generating images!
|
8. Start generating images!
|
||||||
@@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
Client](../features/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
9. Subsequently, to relaunch the script, first activate the Anaconda
|
9. Subsequently, to relaunch the script, first activate the Anaconda
|
||||||
@@ -3,10 +3,10 @@ info:
|
|||||||
title: Stable Diffusion
|
title: Stable Diffusion
|
||||||
description: |-
|
description: |-
|
||||||
TODO: Description Here
|
TODO: Description Here
|
||||||
|
|
||||||
Some useful links:
|
Some useful links:
|
||||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
||||||
|
|
||||||
license:
|
license:
|
||||||
name: MIT License
|
name: MIT License
|
||||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
||||||
@@ -36,7 +36,7 @@ paths:
|
|||||||
description: successful operation
|
description: successful operation
|
||||||
content:
|
content:
|
||||||
image/png:
|
image/png:
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
format: binary
|
format: binary
|
||||||
'404':
|
'404':
|
||||||
@@ -66,7 +66,7 @@ paths:
|
|||||||
description: successful operation
|
description: successful operation
|
||||||
content:
|
content:
|
||||||
image/png:
|
image/png:
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
format: binary
|
format: binary
|
||||||
'404':
|
'404':
|
||||||
|
|||||||
@@ -13,20 +13,6 @@ We thank them for all of their time and hard work.
|
|||||||
|
|
||||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||||
|
|
||||||
## **Current core team**
|
|
||||||
|
|
||||||
* @lstein (Lincoln Stein) - Co-maintainer
|
|
||||||
* @blessedcoolant - Co-maintainer
|
|
||||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
|
||||||
* @psychedelicious - Web Team Leader
|
|
||||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
|
||||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
|
||||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
|
||||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
|
||||||
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
|
|
||||||
* @keturn - Lead for Diffusers port
|
|
||||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
|
||||||
|
|
||||||
## **Contributions by**
|
## **Contributions by**
|
||||||
|
|
||||||
- [Sean McLellan](https://github.com/Oceanswave)
|
- [Sean McLellan](https://github.com/Oceanswave)
|
||||||
@@ -75,7 +61,6 @@ We thank them for all of their time and hard work.
|
|||||||
- [Kent Keirsey](https://github.com/hipsterusername)
|
- [Kent Keirsey](https://github.com/hipsterusername)
|
||||||
- [psychedelicious](https://github.com/psychedelicious)
|
- [psychedelicious](https://github.com/psychedelicious)
|
||||||
- [damian0815](https://github.com/damian0815)
|
- [damian0815](https://github.com/damian0815)
|
||||||
- [Eugene Brodsky](https://github.com/ebr)
|
|
||||||
|
|
||||||
## **Original CompVis Authors**
|
## **Original CompVis Authors**
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ name: invokeai
|
|||||||
channels:
|
channels:
|
||||||
- pytorch
|
- pytorch
|
||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
|
||||||
dependencies:
|
dependencies:
|
||||||
- albumentations=0.4.3
|
- albumentations=0.4.3
|
||||||
- cudatoolkit
|
- cudatoolkit
|
||||||
@@ -30,9 +29,10 @@ dependencies:
|
|||||||
- torchvision
|
- torchvision
|
||||||
- transformers=4.21.3
|
- transformers=4.21.3
|
||||||
- pip:
|
- pip:
|
||||||
|
- dependency_injector==4.40.0
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
- omegaconf==2.1.1
|
- omegaconf==2.1.1
|
||||||
- picklescan
|
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
- realesrgan
|
- realesrgan
|
||||||
- taming-transformers-rom1504
|
- taming-transformers-rom1504
|
||||||
@@ -40,6 +40,6 @@ dependencies:
|
|||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
- -e .
|
||||||
|
variables:
|
||||||
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||||
|
|||||||
@@ -4,12 +4,13 @@ channels:
|
|||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- python=3.9.*
|
- python>=3.9
|
||||||
- pip=22.2.2
|
- pip=22.2.2
|
||||||
- numpy=1.23.3
|
- numpy=1.23.3
|
||||||
- pip:
|
- pip:
|
||||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
|
- dependency_injector==4.40.0
|
||||||
- diffusers==0.6.0
|
- diffusers==0.6.0
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@@ -17,12 +18,12 @@ dependencies:
|
|||||||
- flask_cors==3.0.10
|
- flask_cors==3.0.10
|
||||||
- flask_socketio==5.3.0
|
- flask_socketio==5.3.0
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
- imageio-ffmpeg==0.4.2
|
- imageio-ffmpeg==0.4.2
|
||||||
- imageio==2.9.0
|
- imageio==2.9.0
|
||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- omegaconf==2.2.3
|
- omegaconf==2.2.3
|
||||||
- opencv-python==4.5.5.64
|
- opencv-python==4.5.5.64
|
||||||
- picklescan
|
|
||||||
- pillow==9.2.0
|
- pillow==9.2.0
|
||||||
- pudb==2019.2
|
- pudb==2019.2
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
@@ -32,7 +33,6 @@ dependencies:
|
|||||||
- streamlit==1.12.0
|
- streamlit==1.12.0
|
||||||
- taming-transformers-rom1504
|
- taming-transformers-rom1504
|
||||||
- test-tube>=0.7.5
|
- test-tube>=0.7.5
|
||||||
- tqdm
|
|
||||||
- torch
|
- torch
|
||||||
- torch-fidelity==0.3.0
|
- torch-fidelity==0.3.0
|
||||||
- torchaudio
|
- torchaudio
|
||||||
@@ -40,8 +40,6 @@ dependencies:
|
|||||||
- torchvision
|
- torchvision
|
||||||
- transformers==4.21.3
|
- transformers==4.21.3
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
- -e .
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ channels:
|
|||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- python=3.9.*
|
- python>=3.9
|
||||||
- pip=22.2.2
|
- pip=22.2.2
|
||||||
- numpy=1.23.3
|
- numpy=1.23.3
|
||||||
- torchvision=0.13.1
|
- torchvision=0.13.1
|
||||||
@@ -13,6 +13,7 @@ dependencies:
|
|||||||
- cudatoolkit=11.6
|
- cudatoolkit=11.6
|
||||||
- pip:
|
- pip:
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
|
- dependency_injector==4.40.0
|
||||||
- diffusers==0.6.0
|
- diffusers==0.6.0
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@@ -20,12 +21,12 @@ dependencies:
|
|||||||
- flask_cors==3.0.10
|
- flask_cors==3.0.10
|
||||||
- flask_socketio==5.3.0
|
- flask_socketio==5.3.0
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
- imageio-ffmpeg==0.4.2
|
- imageio-ffmpeg==0.4.2
|
||||||
- imageio==2.9.0
|
- imageio==2.9.0
|
||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- omegaconf==2.2.3
|
- omegaconf==2.2.3
|
||||||
- opencv-python==4.5.5.64
|
- opencv-python==4.5.5.64
|
||||||
- picklescan
|
|
||||||
- pillow==9.2.0
|
- pillow==9.2.0
|
||||||
- pudb==2019.2
|
- pudb==2019.2
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
@@ -39,8 +40,6 @@ dependencies:
|
|||||||
- torchmetrics==0.7.0
|
- torchmetrics==0.7.0
|
||||||
- transformers==4.21.3
|
- transformers==4.21.3
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
- -e .
|
||||||
|
|||||||
@@ -52,14 +52,13 @@ dependencies:
|
|||||||
- transformers=4.23
|
- transformers=4.23
|
||||||
- pip:
|
- pip:
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
- picklescan
|
|
||||||
- taming-transformers-rom1504
|
- taming-transformers-rom1504
|
||||||
- test-tube==0.7.5
|
- test-tube==0.7.5
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||||
|
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
- -e .
|
||||||
variables:
|
variables:
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ channels:
|
|||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- python=3.10.*
|
- python>=3.9
|
||||||
- pip=22.2.2
|
- pip=22.2.2
|
||||||
- numpy=1.23.3
|
- numpy=1.23.3
|
||||||
- torchvision=0.13.1
|
- torchvision=0.13.1
|
||||||
@@ -13,6 +13,8 @@ dependencies:
|
|||||||
- cudatoolkit=11.6
|
- cudatoolkit=11.6
|
||||||
- pip:
|
- pip:
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
|
- basicsr==1.4.1
|
||||||
|
- dependency_injector==4.40.0
|
||||||
- diffusers==0.6.0
|
- diffusers==0.6.0
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@@ -20,12 +22,12 @@ dependencies:
|
|||||||
- flask_cors==3.0.10
|
- flask_cors==3.0.10
|
||||||
- flask_socketio==5.3.0
|
- flask_socketio==5.3.0
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
- imageio-ffmpeg==0.4.2
|
- imageio-ffmpeg==0.4.2
|
||||||
- imageio==2.9.0
|
- imageio==2.9.0
|
||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- omegaconf==2.2.3
|
- omegaconf==2.2.3
|
||||||
- opencv-python==4.5.5.64
|
- opencv-python==4.5.5.64
|
||||||
- picklescan
|
|
||||||
- pillow==9.2.0
|
- pillow==9.2.0
|
||||||
- pudb==2019.2
|
- pudb==2019.2
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
@@ -41,6 +43,4 @@ dependencies:
|
|||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
- -e .
|
||||||
|
|||||||
@@ -1,39 +1,36 @@
|
|||||||
# pip will resolve the version which matches torch
|
# pip will resolve the version which matches torch
|
||||||
albumentations
|
albumentations
|
||||||
diffusers==0.10.*
|
dependency_injector==4.40.0
|
||||||
|
diffusers
|
||||||
einops
|
einops
|
||||||
eventlet
|
eventlet
|
||||||
facexlib
|
|
||||||
flask==2.1.3
|
flask==2.1.3
|
||||||
flask_cors==3.0.10
|
flask_cors==3.0.10
|
||||||
flask_socketio==5.3.0
|
flask_socketio==5.3.0
|
||||||
flaskwebgui==1.0.3
|
flaskwebgui==0.3.7
|
||||||
getpass_asterisk
|
getpass_asterisk
|
||||||
gfpgan==1.3.8
|
gfpgan
|
||||||
huggingface-hub
|
huggingface-hub
|
||||||
imageio
|
imageio
|
||||||
imageio-ffmpeg
|
imageio-ffmpeg
|
||||||
kornia
|
kornia
|
||||||
numpy==1.23.*
|
numpy
|
||||||
omegaconf
|
omegaconf
|
||||||
opencv-python
|
opencv-python
|
||||||
picklescan
|
|
||||||
pillow
|
pillow
|
||||||
pip>=22
|
pip>=22
|
||||||
pudb
|
pudb
|
||||||
pyreadline3
|
pyreadline3
|
||||||
pytorch-lightning==1.7.7
|
pytorch-lightning==1.7.7
|
||||||
realesrgan
|
realesrgan
|
||||||
requests==2.25.1
|
|
||||||
scikit-image>=0.19
|
scikit-image>=0.19
|
||||||
send2trash
|
send2trash
|
||||||
streamlit
|
streamlit
|
||||||
taming-transformers-rom1504
|
taming-transformers-rom1504
|
||||||
test-tube>=0.7.5
|
test-tube
|
||||||
torch-fidelity
|
torch-fidelity
|
||||||
torchmetrics
|
torchmetrics
|
||||||
transformers==4.25.*
|
transformers==4.21.*
|
||||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
|
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
# Get hardware-appropriate torch/torchvision
|
|
||||||
|
# Get hardware-appropriate torch/torchvision
|
||||||
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
||||||
torch
|
torch
|
||||||
torchvision
|
torchvision
|
||||||
|
|||||||
@@ -1,5 +1,2 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
|
||||||
-r environments-and-requirements/requirements-base.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
torch
|
|
||||||
torchvision
|
|
||||||
-e .
|
-e .
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
grpcio<1.51.0
|
|
||||||
protobuf==3.19.6
|
protobuf==3.19.6
|
||||||
torch<1.13.0
|
torch<1.13.0
|
||||||
torchvision<0.14.0
|
torchvision<0.14.0
|
||||||
|
|||||||