Compare commits
61 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90d37eac03 | ||
|
|
230de023ff | ||
|
|
febf86dedf | ||
|
|
76ae17abac | ||
|
|
339ff4b464 | ||
|
|
00c0e487dd | ||
|
|
5c8dfa38be | ||
|
|
acf85c66a5 | ||
|
|
3619918954 | ||
|
|
65b14683a8 | ||
|
|
f4fc02a3da | ||
|
|
c334170a93 | ||
|
|
deab6c64fc | ||
|
|
e1c9503951 | ||
|
|
9a21812bf5 | ||
|
|
347b5ce452 | ||
|
|
b39029521b | ||
|
|
97b26f3de2 | ||
|
|
e19a7a990d | ||
|
|
3e424e1046 | ||
|
|
db20b4af9c | ||
|
|
44ff8f8531 | ||
|
|
a8b794d7e0 | ||
|
|
f868362ca8 | ||
|
|
8858f7e97c | ||
|
|
2db4969e18 | ||
|
|
2ecc1abf21 | ||
|
|
703bc9494a | ||
|
|
e5ab07091d | ||
|
|
891678b656 | ||
|
|
39ea2a257c | ||
|
|
2d68eae16b | ||
|
|
d65948c423 | ||
|
|
9910a0b004 | ||
|
|
ff96358cb3 | ||
|
|
edf471f655 | ||
|
|
5b02c8ca4a | ||
|
|
e7688c53b8 | ||
|
|
87cada42db | ||
|
|
6fe67ee426 | ||
|
|
5fbc81885a | ||
|
|
25ba5451f2 | ||
|
|
138c9cf7a8 | ||
|
|
87981306a3 | ||
|
|
f7893b3ea9 | ||
|
|
87395fe6fe | ||
|
|
15f876c66c | ||
|
|
522c35ac5b | ||
|
|
bb2d6d640f | ||
|
|
2412d8dec1 | ||
|
|
2ab5a43663 | ||
|
|
0ec3d6c10a | ||
|
|
d208e1b0f5 | ||
|
|
8a6ba6a212 | ||
|
|
b793d69ff3 | ||
|
|
54f55471df | ||
|
|
cec7fb7dc6 | ||
|
|
b0b82efffe | ||
|
|
e599604294 | ||
|
|
57a3ea9d7b | ||
|
|
a3a50bb886 |
@@ -1,3 +0,0 @@
|
||||
*
|
||||
!environment*.yml
|
||||
!docker-build
|
||||
42
.github/workflows/build-container.yml
vendored
@@ -1,42 +0,0 @@
|
||||
# Building the Image without pushing to confirm it is still buildable
|
||||
# confirum functionality would unfortunately need way more resources
|
||||
name: build container image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: prepare docker-tag
|
||||
env:
|
||||
repository: ${{ github.repository }}
|
||||
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: buildx-${{ hashFiles('docker-build/Dockerfile') }}
|
||||
- name: Build container
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:latest
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
25
.github/workflows/create-caches.yml
vendored
@@ -54,10 +54,27 @@ jobs:
|
||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||
[[ -r models/ldm/stable-diffusion-v1/model.ckpt ]] \
|
||||
|| curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
-L https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
|| curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }}
|
||||
|
||||
- name: Use cached Conda Environment
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-conda-env-${{ env.CONDA_ENV_NAME }}
|
||||
conda-env-file: ${{ matrix.environment-file }}
|
||||
with:
|
||||
path: ${{ env.CONDA_ROOT }}/envs/${{ env.CONDA_ENV_NAME }}
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: ${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(env.conda-env-file) }}
|
||||
|
||||
- name: Use cached Conda Packages
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-conda-env-${{ env.CONDA_ENV_NAME }}
|
||||
conda-env-file: ${{ matrix.environment-file }}
|
||||
with:
|
||||
path: ${{ env.CONDA_PKGS_DIR }}
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: ${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(env.conda-env-file) }}
|
||||
|
||||
- name: Activate Conda Env
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
|
||||
28
.github/workflows/mkdocs-flow.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Deploy
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - main
|
||||
jobs:
|
||||
build:
|
||||
name: Deploy docs to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build
|
||||
uses: Tiryoh/actions-mkdocs@v0
|
||||
with:
|
||||
mkdocs_version: 'latest' # option
|
||||
requirements: '/requirements-mkdocs.txt' # option
|
||||
configfile: '/mkdocs.yml' # option
|
||||
- name: Deploy
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./site
|
||||
113
.github/workflows/test-invoke-conda.yml
vendored
@@ -1,63 +1,41 @@
|
||||
name: Test invoke.py
|
||||
name: Test Invoke with Conda
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'fix-gh-actions-fork'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
os_matrix:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
- 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
|
||||
- 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- macOS-12
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
environment-file: environment.yml
|
||||
default-shell: bash -l {0}
|
||||
- os: macOS-12
|
||||
- os: macos-latest
|
||||
environment-file: environment-mac.yml
|
||||
default-shell: bash -l {0}
|
||||
- stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
stable-diffusion-model-switch: stable-diffusion-1.4
|
||||
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-switch: stable-diffusion-1.5
|
||||
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
|
||||
name: Test invoke.py on ${{ matrix.os }} with conda
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
CONDA_ENV_NAME: invokeai
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Use cached conda packages
|
||||
id: use-cached-conda-packages
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/conda_pkgs_dir
|
||||
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-file) }}
|
||||
|
||||
- name: Activate Conda Env
|
||||
id: activate-conda-env
|
||||
- name: setup miniconda
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: ${{ matrix.environment-file }}
|
||||
auto-activate-base: false
|
||||
auto-update-conda: false
|
||||
miniconda-version: latest
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
@@ -70,40 +48,79 @@ jobs:
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
run: echo "TEST_PROMPTS=tests/pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
||||
id: download-stable-diffusion-model
|
||||
- name: set conda environment name
|
||||
run: echo "CONDA_ENV_NAME=invokeai" >> $GITHUB_ENV
|
||||
|
||||
- name: Use Cached Stable Diffusion v1.4 Model
|
||||
id: cache-sd-v1-4
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-sd-v1-4
|
||||
with:
|
||||
path: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: ${{ env.cache-name }}
|
||||
|
||||
- name: Download Stable Diffusion v1.4 Model
|
||||
if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o ${{ matrix.stable-diffusion-model-dl-path }} \
|
||||
-L ${{ matrix.stable-diffusion-model }}
|
||||
[[ -r models/ldm/stable-diffusion-v1/model.ckpt ]] \
|
||||
|| curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }}
|
||||
|
||||
- name: Use cached Conda Environment
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-conda-env-${{ env.CONDA_ENV_NAME }}
|
||||
conda-env-file: ${{ matrix.environment-file }}
|
||||
with:
|
||||
path: ${{ env.CONDA }}/envs/${{ env.CONDA_ENV_NAME }}
|
||||
key: env-${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(env.conda-env-file) }}
|
||||
|
||||
- name: Use cached Conda Packages
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-conda-pkgs-${{ env.CONDA_ENV_NAME }}
|
||||
conda-env-file: ${{ matrix.environment-file }}
|
||||
with:
|
||||
path: ${{ env.CONDA_PKGS_DIR }}
|
||||
key: pkgs-${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(env.conda-env-file) }}
|
||||
|
||||
- name: Activate Conda Env
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: ${{ matrix.environment-file }}
|
||||
|
||||
- name: Use Cached Huggingface and Torch models
|
||||
id: cache-hugginface-torch
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-hugginface-torch
|
||||
with:
|
||||
path: ~/.cache
|
||||
key: ${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }}
|
||||
|
||||
- name: run preload_models.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
run: python scripts/preload_models.py
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--model ${{ matrix.stable-diffusion-model-switch }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }}
|
||||
|
||||
- name: export conda env
|
||||
id: export-conda-env
|
||||
run: |
|
||||
mkdir -p outputs/img-samples
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}.yml
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.os }}_${{ matrix.stable-diffusion-model-switch }}
|
||||
name: results_${{ matrix.os }}
|
||||
path: outputs/img-samples
|
||||
|
||||
16
.gitignore
vendored
@@ -1,11 +1,7 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
ldm/invoke/restoration/codeformer/weights
|
||||
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
**/restoration/codeformer/weights
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
@@ -184,7 +180,7 @@ src
|
||||
**/__pycache__/
|
||||
outputs
|
||||
|
||||
# Logs and associated folders
|
||||
# Logs and associated folders
|
||||
# created from generated embeddings.
|
||||
logs
|
||||
testtube
|
||||
@@ -199,13 +195,7 @@ checkpoints
|
||||
.scratch/
|
||||
.vscode/
|
||||
gfpgan/
|
||||
models/ldm/stable-diffusion-v1/*.sha256
|
||||
models/ldm/stable-diffusion-v1/model.sha256
|
||||
|
||||
# GFPGAN model files
|
||||
gfpgan/
|
||||
|
||||
# config file (will be created by installer)
|
||||
configs/models.yaml
|
||||
|
||||
# weights (will be created by installer)
|
||||
models/ldm/stable-diffusion-v1/*.ckpt
|
||||
13
LICENSE
@@ -1,17 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Lincoln Stein and InvokeAI Organization
|
||||
|
||||
This software is derived from a fork of the source code available from
|
||||
https://github.com/pesser/stable-diffusion and
|
||||
https://github.com/CompViz/stable-diffusion. They carry the following
|
||||
copyrights:
|
||||
|
||||
Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
|
||||
Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
|
||||
|
||||
Please see individual source code files for copyright and authorship
|
||||
attributions.
|
||||
Copyright (c) 2022 InvokeAI Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
69
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
_Formally known as lstein/stable-diffusion_
|
||||
|
||||

|
||||
|
||||
@@ -68,11 +68,11 @@ requests. Be sure to use the provided templates. They will help aid diagnose iss
|
||||
This fork is supported across multiple platforms. You can find individual installation instructions
|
||||
below.
|
||||
|
||||
- #### [Linux](docs/installation/INSTALL_LINUX.md)
|
||||
- #### [Linux](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_LINUX/)
|
||||
|
||||
- #### [Windows](docs/installation/INSTALL_WINDOWS.md)
|
||||
- #### [Windows](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_WINDOWS/)
|
||||
|
||||
- #### [Macintosh](docs/installation/INSTALL_MAC.md)
|
||||
- #### [Macintosh](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_MAC/)
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
@@ -103,34 +103,33 @@ errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ python scripts/invoke.py --precision=float32
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
#### Major Features
|
||||
|
||||
- [Web Server](docs/features/WEB.md)
|
||||
- [Interactive Command Line Interface](docs/features/CLI.md)
|
||||
- [Image To Image](docs/features/IMG2IMG.md)
|
||||
- [Inpainting Support](docs/features/INPAINTING.md)
|
||||
- [Outpainting Support](docs/features/OUTPAINTING.md)
|
||||
- [Upscaling, face-restoration and outpainting](docs/features/POSTPROCESS.md)
|
||||
- [Seamless Tiling](docs/features/OTHER.md#seamless-tiling)
|
||||
- [Google Colab](docs/features/OTHER.md#google-colab)
|
||||
- [Reading Prompts From File](docs/features/PROMPTS.md#reading-prompts-from-a-file)
|
||||
- [Shortcut: Reusing Seeds](docs/features/OTHER.md#shortcuts-reusing-seeds)
|
||||
- [Prompt Blending](docs/features/PROMPTS.md#prompt-blending)
|
||||
- [Thresholding and Perlin Noise Initialization Options](/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options)
|
||||
- [Negative/Unconditioned Prompts](docs/features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||
- [Variations](docs/features/VARIATIONS.md)
|
||||
- [Personalizing Text-to-Image Generation](docs/features/TEXTUAL_INVERSION.md)
|
||||
- [Simplified API for text to image generation](docs/features/OTHER.md#simplified-api)
|
||||
- [Web Server](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||
- [Interactive Command Line Interface](https://invoke-ai.github.io/InvokeAI/features/CLI/)
|
||||
- [Image To Image](https://invoke-ai.github.io/InvokeAI/features/IMG2IMG/)
|
||||
- [Inpainting Support](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||
- [Outpainting Support](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/)
|
||||
- [Upscaling, face-restoration and outpainting](https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/)
|
||||
- [Reading Prompts From File](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#reading-prompts-from-a-file)
|
||||
- [Prompt Blending](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-blending)
|
||||
- [Thresholding and Perlin Noise Initialization Options](https://invoke-ai.github.io/InvokeAI/features/OTHER/#thresholding-and-perlin-noise-initialization-options)
|
||||
- [Negative/Unconditioned Prompts](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts)
|
||||
- [Variations](https://invoke-ai.github.io/InvokeAI/features/VARIATIONS/)
|
||||
- [Personalizing Text-to-Image Generation](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||
- [Simplified API for text to image generation](https://invoke-ai.github.io/InvokeAI/features/OTHER/#simplified-api)
|
||||
|
||||
#### Other Features
|
||||
|
||||
- [Creating Transparent Regions for Inpainting](docs/features/INPAINTING.md#creating-transparent-regions-for-inpainting)
|
||||
- [Preload Models](docs/features/OTHER.md#preload-models)
|
||||
- [Google Colab](https://invoke-ai.github.io/InvokeAI/features/OTHER/#google-colab)
|
||||
- [Seamless Tiling](https://invoke-ai.github.io/InvokeAI/features/OTHER/#seamless-tiling)
|
||||
- [Shortcut: Reusing Seeds](https://invoke-ai.github.io/InvokeAI/features/OTHER/#shortcuts-reusing-seeds)
|
||||
- [Preload Models](https://invoke-ai.github.io/InvokeAI/features/OTHER/#preload-models)
|
||||
|
||||
### Latest Changes
|
||||
|
||||
@@ -144,33 +143,33 @@ you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.md#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.md">command-line completion behavior</a>.
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
* List command-line history with `!history`
|
||||
* Search command-line history with `!search`
|
||||
* Clear history with `!clear`
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](docs/features/CHANGELOG.md)**.
|
||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Please check out our **[Q&A](docs/help/TROUBLESHOOT.md)** to get solutions for common installation
|
||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||
problems and other issues.
|
||||
|
||||
# Contributing
|
||||
@@ -188,7 +187,7 @@ changes.
|
||||
### Contributors
|
||||
|
||||
This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](docs/other/CONTRIBUTORS.md). We thank them for
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
### Support
|
||||
@@ -202,4 +201,4 @@ Original portions of the software are Copyright (c) 2020
|
||||
### Further Reading
|
||||
|
||||
Please see the original README for more information on this software and underlying algorithm,
|
||||
located in the file [README-CompViz.md](docs/other/README-CompViz.md).
|
||||
located in the file [README-CompViz.md](https://invoke-ai.github.io/InvokeAI/other/README-CompViz/).
|
||||
|
||||
|
Before Width: | Height: | Size: 33 KiB |
822
backend/server.py
Normal file
@@ -0,0 +1,822 @@
|
||||
import mimetypes
|
||||
import transformers
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
import eventlet
|
||||
import glob
|
||||
import shlex
|
||||
import math
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
sys.path.append(".")
|
||||
|
||||
from argparse import ArgumentTypeError
|
||||
from modules.create_cmd_parser import create_cmd_parser
|
||||
|
||||
parser = create_cmd_parser()
|
||||
opt = parser.parse_args()
|
||||
|
||||
|
||||
from flask_socketio import SocketIO
|
||||
from flask import Flask, send_from_directory, url_for, jsonify
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from pytorch_lightning import logging
|
||||
from threading import Event
|
||||
from uuid import uuid4
|
||||
from send2trash import send2trash
|
||||
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.restoration import Restoration
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.args import APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import split_weighted_subprompts
|
||||
|
||||
from modules.parameters import parameters_to_command
|
||||
|
||||
|
||||
"""
|
||||
USER CONFIG
|
||||
"""
|
||||
if opt.cors and "*" in opt.cors:
|
||||
raise ArgumentTypeError('"*" is not an allowed CORS origin')
|
||||
|
||||
|
||||
output_dir = "outputs/" # Base output directory for images
|
||||
host = opt.host # Web & socket.io host
|
||||
port = opt.port # Web & socket.io port
|
||||
verbose = opt.verbose # enables copious socket.io logging
|
||||
precision = opt.precision
|
||||
free_gpu_mem = opt.free_gpu_mem
|
||||
embedding_path = opt.embedding_path
|
||||
additional_allowed_origins = (
|
||||
opt.cors if opt.cors else []
|
||||
) # additional CORS allowed origins
|
||||
model = "stable-diffusion-1.4"
|
||||
|
||||
"""
|
||||
END USER CONFIG
|
||||
"""
|
||||
|
||||
|
||||
print("* Initializing, be patient...\n")
|
||||
|
||||
|
||||
"""
|
||||
SERVER SETUP
|
||||
"""
|
||||
|
||||
|
||||
# fix missing mimetypes on windows due to registry wonkiness
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
|
||||
app = Flask(__name__, static_url_path="", static_folder="../frontend/dist/")
|
||||
|
||||
|
||||
app.config["OUTPUTS_FOLDER"] = "../outputs"
|
||||
|
||||
|
||||
@app.route("/outputs/<path:filename>")
|
||||
def outputs(filename):
|
||||
return send_from_directory(app.config["OUTPUTS_FOLDER"], filename)
|
||||
|
||||
|
||||
@app.route("/", defaults={"path": ""})
|
||||
def serve(path):
|
||||
return send_from_directory(app.static_folder, "index.html")
|
||||
|
||||
|
||||
logger = True if verbose else False
|
||||
engineio_logger = True if verbose else False
|
||||
|
||||
# default 1,000,000, needs to be higher for socketio to accept larger images
|
||||
max_http_buffer_size = 10000000
|
||||
|
||||
cors_allowed_origins = [f"http://{host}:{port}"] + additional_allowed_origins
|
||||
|
||||
socketio = SocketIO(
|
||||
app,
|
||||
logger=logger,
|
||||
engineio_logger=engineio_logger,
|
||||
max_http_buffer_size=max_http_buffer_size,
|
||||
cors_allowed_origins=cors_allowed_origins,
|
||||
ping_interval=(50, 50),
|
||||
ping_timeout=60,
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
END SERVER SETUP
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
APP SETUP
|
||||
"""
|
||||
|
||||
|
||||
class CanceledException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
gfpgan, codeformer, esrgan = None, None, None
|
||||
from ldm.invoke.restoration.base import Restoration
|
||||
|
||||
restoration = Restoration()
|
||||
gfpgan, codeformer = restoration.load_face_restore_models()
|
||||
esrgan = restoration.load_esrgan()
|
||||
|
||||
# coreformer.process(self, image, strength, device, seed=None, fidelity=0.75)
|
||||
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
||||
|
||||
canceled = Event()
|
||||
|
||||
# reduce logging outputs to error
|
||||
transformers.logging.set_verbosity_error()
|
||||
logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
|
||||
|
||||
# Initialize and load model
|
||||
generate = Generate(
|
||||
model,
|
||||
precision=precision,
|
||||
embedding_path=embedding_path,
|
||||
)
|
||||
generate.free_gpu_mem = free_gpu_mem
|
||||
generate.load_model()
|
||||
|
||||
|
||||
# location for "finished" images
|
||||
result_path = os.path.join(output_dir, "img-samples/")
|
||||
|
||||
# temporary path for intermediates
|
||||
intermediate_path = os.path.join(result_path, "intermediates/")
|
||||
|
||||
# path for user-uploaded init images and masks
|
||||
init_image_path = os.path.join(result_path, "init-images/")
|
||||
mask_image_path = os.path.join(result_path, "mask-images/")
|
||||
|
||||
# txt log
|
||||
log_path = os.path.join(result_path, "invoke_log.txt")
|
||||
|
||||
# make all output paths
|
||||
[
|
||||
os.makedirs(path, exist_ok=True)
|
||||
for path in [result_path, intermediate_path, init_image_path, mask_image_path]
|
||||
]
|
||||
|
||||
|
||||
"""
|
||||
END APP SETUP
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
SOCKET.IO LISTENERS
|
||||
"""
|
||||
|
||||
|
||||
@socketio.on("requestSystemConfig")
|
||||
def handle_request_capabilities():
|
||||
print(f">> System config requested")
|
||||
config = get_system_config()
|
||||
socketio.emit("systemConfig", config)
|
||||
|
||||
|
||||
@socketio.on("requestImages")
|
||||
def handle_request_images(page=1, offset=0, last_mtime=None):
|
||||
chunk_size = 50
|
||||
|
||||
if last_mtime:
|
||||
print(f">> Latest images requested")
|
||||
else:
|
||||
print(
|
||||
f">> Page {page} of images requested (page size {chunk_size} offset {offset})"
|
||||
)
|
||||
|
||||
paths = glob.glob(os.path.join(result_path, "*.png"))
|
||||
sorted_paths = sorted(paths, key=lambda x: os.path.getmtime(x), reverse=True)
|
||||
|
||||
if last_mtime:
|
||||
image_paths = filter(lambda x: os.path.getmtime(x) > last_mtime, sorted_paths)
|
||||
else:
|
||||
|
||||
image_paths = sorted_paths[
|
||||
slice(chunk_size * (page - 1) + offset, chunk_size * page + offset)
|
||||
]
|
||||
page = page + 1
|
||||
|
||||
image_array = []
|
||||
|
||||
for path in image_paths:
|
||||
metadata = retrieve_metadata(path)
|
||||
image_array.append(
|
||||
{
|
||||
"url": path,
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata["sd-metadata"],
|
||||
}
|
||||
)
|
||||
|
||||
socketio.emit(
|
||||
"galleryImages",
|
||||
{
|
||||
"images": image_array,
|
||||
"nextPage": page,
|
||||
"offset": offset,
|
||||
"onlyNewImages": True if last_mtime else False,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("generateImage")
|
||||
def handle_generate_image_event(
|
||||
generation_parameters, esrgan_parameters, gfpgan_parameters
|
||||
):
|
||||
print(
|
||||
f">> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}"
|
||||
)
|
||||
generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
|
||||
|
||||
|
||||
@socketio.on("runESRGAN")
|
||||
def handle_run_esrgan_event(original_image, esrgan_parameters):
|
||||
print(
|
||||
f'>> ESRGAN upscale requested for "{original_image["url"]}": {esrgan_parameters}'
|
||||
)
|
||||
progress = {
|
||||
"currentStep": 1,
|
||||
"totalSteps": 1,
|
||||
"currentIteration": 1,
|
||||
"totalIterations": 1,
|
||||
"currentStatus": "Preparing",
|
||||
"isProcessing": True,
|
||||
"currentStatusHasSteps": False,
|
||||
}
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = Image.open(original_image["url"])
|
||||
|
||||
seed = (
|
||||
original_image["metadata"]["seed"]
|
||||
if "seed" in original_image["metadata"]
|
||||
else "unknown_seed"
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Upscaling"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["upscale"][0],
|
||||
strength=esrgan_parameters["upscale"][1],
|
||||
seed=seed,
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
esrgan_parameters["seed"] = seed
|
||||
metadata = parameters_to_post_processed_image_metadata(
|
||||
parameters=esrgan_parameters,
|
||||
original_image_path=original_image["url"],
|
||||
type="esrgan",
|
||||
)
|
||||
command = parameters_to_command(esrgan_parameters)
|
||||
|
||||
path = save_image(image, command, metadata, result_path, postprocessing="esrgan")
|
||||
|
||||
write_log_message(f'[Upscaled] "{original_image["url"]}" > "{path}": {command}')
|
||||
|
||||
progress["currentStatus"] = "Finished"
|
||||
progress["currentStep"] = 0
|
||||
progress["totalSteps"] = 0
|
||||
progress["currentIteration"] = 0
|
||||
progress["totalIterations"] = 0
|
||||
progress["isProcessing"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
socketio.emit(
|
||||
"esrganResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("runGFPGAN")
|
||||
def handle_run_gfpgan_event(original_image, gfpgan_parameters):
|
||||
print(
|
||||
f'>> GFPGAN face fix requested for "{original_image["url"]}": {gfpgan_parameters}'
|
||||
)
|
||||
progress = {
|
||||
"currentStep": 1,
|
||||
"totalSteps": 1,
|
||||
"currentIteration": 1,
|
||||
"totalIterations": 1,
|
||||
"currentStatus": "Preparing",
|
||||
"isProcessing": True,
|
||||
"currentStatusHasSteps": False,
|
||||
}
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = Image.open(original_image["url"])
|
||||
|
||||
seed = (
|
||||
original_image["metadata"]["seed"]
|
||||
if "seed" in original_image["metadata"]
|
||||
else "unknown_seed"
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Fixing faces"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = gfpgan.process(
|
||||
image=image, strength=gfpgan_parameters["facetool_strength"], seed=seed
|
||||
)
|
||||
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
gfpgan_parameters["seed"] = seed
|
||||
metadata = parameters_to_post_processed_image_metadata(
|
||||
parameters=gfpgan_parameters,
|
||||
original_image_path=original_image["url"],
|
||||
type="gfpgan",
|
||||
)
|
||||
command = parameters_to_command(gfpgan_parameters)
|
||||
|
||||
path = save_image(image, command, metadata, result_path, postprocessing="gfpgan")
|
||||
|
||||
write_log_message(f'[Fixed faces] "{original_image["url"]}" > "{path}": {command}')
|
||||
|
||||
progress["currentStatus"] = "Finished"
|
||||
progress["currentStep"] = 0
|
||||
progress["totalSteps"] = 0
|
||||
progress["currentIteration"] = 0
|
||||
progress["totalIterations"] = 0
|
||||
progress["isProcessing"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
socketio.emit(
|
||||
"gfpganResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.mtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("cancel")
|
||||
def handle_cancel():
|
||||
print(f">> Cancel processing requested")
|
||||
canceled.set()
|
||||
socketio.emit("processingCanceled")
|
||||
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("deleteImage")
|
||||
def handle_delete_image(path, uuid):
|
||||
print(f'>> Delete requested "{path}"')
|
||||
send2trash(path)
|
||||
socketio.emit("imageDeleted", {"url": path, "uuid": uuid})
|
||||
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("uploadInitialImage")
|
||||
def handle_upload_initial_image(bytes, name):
|
||||
print(f'>> Init image upload requested "{name}"')
|
||||
uuid = uuid4().hex
|
||||
split = os.path.splitext(name)
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
file_path = os.path.join(init_image_path, name)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
newFile = open(file_path, "wb")
|
||||
newFile.write(bytes)
|
||||
socketio.emit("initialImageUploaded", {"url": file_path, "uuid": ""})
|
||||
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@socketio.on("uploadMaskImage")
|
||||
def handle_upload_mask_image(bytes, name):
|
||||
print(f'>> Mask image upload requested "{name}"')
|
||||
uuid = uuid4().hex
|
||||
split = os.path.splitext(name)
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
file_path = os.path.join(mask_image_path, name)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
newFile = open(file_path, "wb")
|
||||
newFile.write(bytes)
|
||||
socketio.emit("maskImageUploaded", {"url": file_path, "uuid": ""})
|
||||
|
||||
|
||||
"""
|
||||
END SOCKET.IO LISTENERS
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
ADDITIONAL FUNCTIONS
|
||||
"""
|
||||
|
||||
|
||||
def get_system_config():
|
||||
return {
|
||||
"model": "stable diffusion",
|
||||
"model_id": model,
|
||||
"model_hash": generate.model_hash,
|
||||
"app_id": APP_ID,
|
||||
"app_version": APP_VERSION,
|
||||
}
|
||||
|
||||
|
||||
def parameters_to_post_processed_image_metadata(parameters, original_image_path, type):
|
||||
# top-level metadata minus `image` or `images`
|
||||
metadata = get_system_config()
|
||||
|
||||
orig_hash = calculate_init_img_hash(original_image_path)
|
||||
|
||||
image = {"orig_path": original_image_path, "orig_hash": orig_hash}
|
||||
|
||||
if type == "esrgan":
|
||||
image["type"] = "esrgan"
|
||||
image["scale"] = parameters["upscale"][0]
|
||||
image["strength"] = parameters["upscale"][1]
|
||||
elif type == "gfpgan":
|
||||
image["type"] = "gfpgan"
|
||||
image["strength"] = parameters["facetool_strength"]
|
||||
else:
|
||||
raise TypeError(f"Invalid type: {type}")
|
||||
|
||||
metadata["image"] = image
|
||||
return metadata
|
||||
|
||||
|
||||
def parameters_to_generated_image_metadata(parameters):
|
||||
# top-level metadata minus `image` or `images`
|
||||
|
||||
metadata = get_system_config()
|
||||
# remove any image keys not mentioned in RFC #266
|
||||
rfc266_img_fields = [
|
||||
"type",
|
||||
"postprocessing",
|
||||
"sampler",
|
||||
"prompt",
|
||||
"seed",
|
||||
"variations",
|
||||
"steps",
|
||||
"cfg_scale",
|
||||
"threshold",
|
||||
"perlin",
|
||||
"step_number",
|
||||
"width",
|
||||
"height",
|
||||
"extra",
|
||||
"seamless",
|
||||
"hires_fix",
|
||||
]
|
||||
|
||||
rfc_dict = {}
|
||||
|
||||
for item in parameters.items():
|
||||
key, value = item
|
||||
if key in rfc266_img_fields:
|
||||
rfc_dict[key] = value
|
||||
|
||||
postprocessing = []
|
||||
|
||||
# 'postprocessing' is either null or an
|
||||
if "facetool_strength" in parameters:
|
||||
|
||||
postprocessing.append(
|
||||
{"type": "gfpgan", "strength": float(parameters["facetool_strength"])}
|
||||
)
|
||||
|
||||
if "upscale" in parameters:
|
||||
postprocessing.append(
|
||||
{
|
||||
"type": "esrgan",
|
||||
"scale": int(parameters["upscale"][0]),
|
||||
"strength": float(parameters["upscale"][1]),
|
||||
}
|
||||
)
|
||||
|
||||
rfc_dict["postprocessing"] = postprocessing if len(postprocessing) > 0 else None
|
||||
|
||||
# semantic drift
|
||||
rfc_dict["sampler"] = parameters["sampler_name"]
|
||||
|
||||
# display weighted subprompts (liable to change)
|
||||
subprompts = split_weighted_subprompts(parameters["prompt"])
|
||||
subprompts = [{"prompt": x[0], "weight": x[1]} for x in subprompts]
|
||||
rfc_dict["prompt"] = subprompts
|
||||
|
||||
# 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs
|
||||
variations = []
|
||||
|
||||
if "with_variations" in parameters:
|
||||
variations = [
|
||||
{"seed": x[0], "weight": x[1]} for x in parameters["with_variations"]
|
||||
]
|
||||
|
||||
rfc_dict["variations"] = variations
|
||||
|
||||
if "init_img" in parameters:
|
||||
rfc_dict["type"] = "img2img"
|
||||
rfc_dict["strength"] = parameters["strength"]
|
||||
rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant
|
||||
rfc_dict["orig_hash"] = calculate_init_img_hash(parameters["init_img"])
|
||||
rfc_dict["init_image_path"] = parameters["init_img"] # TODO: Noncompliant
|
||||
rfc_dict["sampler"] = "ddim" # TODO: FIX ME WHEN IMG2IMG SUPPORTS ALL SAMPLERS
|
||||
if "init_mask" in parameters:
|
||||
rfc_dict["mask_hash"] = calculate_init_img_hash(
|
||||
parameters["init_mask"]
|
||||
) # TODO: Noncompliant
|
||||
rfc_dict["mask_image_path"] = parameters["init_mask"] # TODO: Noncompliant
|
||||
else:
|
||||
rfc_dict["type"] = "txt2img"
|
||||
|
||||
metadata["image"] = rfc_dict
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def make_unique_init_image_filename(name):
|
||||
uuid = uuid4().hex
|
||||
split = os.path.splitext(name)
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
return name
|
||||
|
||||
|
||||
def write_log_message(message, log_path=log_path):
|
||||
"""Logs the filename and parameters used to generate or process that image to log file"""
|
||||
message = f"{message}\n"
|
||||
with open(log_path, "a", encoding="utf-8") as file:
|
||||
file.writelines(message)
|
||||
|
||||
|
||||
def save_image(
|
||||
image, command, metadata, output_dir, step_index=None, postprocessing=False
|
||||
):
|
||||
pngwriter = PngWriter(output_dir)
|
||||
prefix = pngwriter.unique_prefix()
|
||||
|
||||
seed = "unknown_seed"
|
||||
|
||||
if "image" in metadata:
|
||||
if "seed" in metadata["image"]:
|
||||
seed = metadata["image"]["seed"]
|
||||
|
||||
filename = f"{prefix}.{seed}"
|
||||
|
||||
if step_index:
|
||||
filename += f".{step_index}"
|
||||
if postprocessing:
|
||||
filename += f".postprocessed"
|
||||
|
||||
filename += ".png"
|
||||
|
||||
path = pngwriter.save_image_and_prompt_to_png(
|
||||
image=image, dream_prompt=command, metadata=metadata, name=filename
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def calculate_real_steps(steps, strength, has_init_image):
|
||||
return math.floor(strength * steps) if has_init_image else steps
|
||||
|
||||
|
||||
def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters):
|
||||
canceled.clear()
|
||||
|
||||
step_index = 1
|
||||
prior_variations = (
|
||||
generation_parameters["with_variations"]
|
||||
if "with_variations" in generation_parameters
|
||||
else []
|
||||
)
|
||||
"""
|
||||
If a result image is used as an init image, and then deleted, we will want to be
|
||||
able to use it as an init image in the future. Need to copy it.
|
||||
|
||||
If the init/mask image doesn't exist in the init_image_path/mask_image_path,
|
||||
make a unique filename for it and copy it there.
|
||||
"""
|
||||
if "init_img" in generation_parameters:
|
||||
filename = os.path.basename(generation_parameters["init_img"])
|
||||
if not os.path.exists(os.path.join(init_image_path, filename)):
|
||||
unique_filename = make_unique_init_image_filename(filename)
|
||||
new_path = os.path.join(init_image_path, unique_filename)
|
||||
shutil.copy(generation_parameters["init_img"], new_path)
|
||||
generation_parameters["init_img"] = new_path
|
||||
if "init_mask" in generation_parameters:
|
||||
filename = os.path.basename(generation_parameters["init_mask"])
|
||||
if not os.path.exists(os.path.join(mask_image_path, filename)):
|
||||
unique_filename = make_unique_init_image_filename(filename)
|
||||
new_path = os.path.join(init_image_path, unique_filename)
|
||||
shutil.copy(generation_parameters["init_img"], new_path)
|
||||
generation_parameters["init_mask"] = new_path
|
||||
|
||||
totalSteps = calculate_real_steps(
|
||||
steps=generation_parameters["steps"],
|
||||
strength=generation_parameters["strength"]
|
||||
if "strength" in generation_parameters
|
||||
else None,
|
||||
has_init_image="init_img" in generation_parameters,
|
||||
)
|
||||
|
||||
progress = {
|
||||
"currentStep": 1,
|
||||
"totalSteps": totalSteps,
|
||||
"currentIteration": 1,
|
||||
"totalIterations": generation_parameters["iterations"],
|
||||
"currentStatus": "Preparing",
|
||||
"isProcessing": True,
|
||||
"currentStatusHasSteps": False,
|
||||
}
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_progress(sample, step):
|
||||
if canceled.is_set():
|
||||
raise CanceledException
|
||||
|
||||
nonlocal step_index
|
||||
nonlocal generation_parameters
|
||||
nonlocal progress
|
||||
|
||||
progress["currentStep"] = step + 1
|
||||
progress["currentStatus"] = "Generating"
|
||||
progress["currentStatusHasSteps"] = True
|
||||
|
||||
if (
|
||||
generation_parameters["progress_images"]
|
||||
and step % 5 == 0
|
||||
and step < generation_parameters["steps"] - 1
|
||||
):
|
||||
image = generate.sample_to_image(sample)
|
||||
|
||||
metadata = parameters_to_generated_image_metadata(generation_parameters)
|
||||
command = parameters_to_command(generation_parameters)
|
||||
path = save_image(image, command, metadata, intermediate_path, step_index=step_index, postprocessing=False)
|
||||
|
||||
step_index += 1
|
||||
socketio.emit(
|
||||
"intermediateResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed):
|
||||
nonlocal generation_parameters
|
||||
nonlocal esrgan_parameters
|
||||
nonlocal gfpgan_parameters
|
||||
nonlocal progress
|
||||
|
||||
step_index = 1
|
||||
nonlocal prior_variations
|
||||
|
||||
progress["currentStatus"] = "Generation complete"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
all_parameters = generation_parameters
|
||||
postprocessing = False
|
||||
|
||||
if (
|
||||
"variation_amount" in all_parameters
|
||||
and all_parameters["variation_amount"] > 0
|
||||
):
|
||||
first_seed = first_seed or seed
|
||||
this_variation = [[seed, all_parameters["variation_amount"]]]
|
||||
all_parameters["with_variations"] = prior_variations + this_variation
|
||||
all_parameters["seed"] = first_seed
|
||||
elif ("with_variations" in all_parameters):
|
||||
all_parameters["seed"] = first_seed
|
||||
else:
|
||||
all_parameters["seed"] = seed
|
||||
|
||||
if esrgan_parameters:
|
||||
progress["currentStatus"] = "Upscaling"
|
||||
progress["currentStatusHasSteps"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["level"],
|
||||
strength=esrgan_parameters["strength"],
|
||||
seed=seed,
|
||||
)
|
||||
|
||||
postprocessing = True
|
||||
all_parameters["upscale"] = [
|
||||
esrgan_parameters["level"],
|
||||
esrgan_parameters["strength"],
|
||||
]
|
||||
|
||||
if gfpgan_parameters:
|
||||
progress["currentStatus"] = "Fixing faces"
|
||||
progress["currentStatusHasSteps"] = False
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = gfpgan.process(
|
||||
image=image, strength=gfpgan_parameters["strength"], seed=seed
|
||||
)
|
||||
postprocessing = True
|
||||
all_parameters["facetool_strength"] = gfpgan_parameters["strength"]
|
||||
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
metadata = parameters_to_generated_image_metadata(all_parameters)
|
||||
command = parameters_to_command(all_parameters)
|
||||
|
||||
path = save_image(
|
||||
image, command, metadata, result_path, postprocessing=postprocessing
|
||||
)
|
||||
|
||||
print(f'>> Image generated: "{path}"')
|
||||
write_log_message(f'[Generated] "{path}": {command}')
|
||||
|
||||
if progress["totalIterations"] > progress["currentIteration"]:
|
||||
progress["currentStep"] = 1
|
||||
progress["currentIteration"] += 1
|
||||
progress["currentStatus"] = "Iteration finished"
|
||||
progress["currentStatusHasSteps"] = False
|
||||
else:
|
||||
progress["currentStep"] = 0
|
||||
progress["totalSteps"] = 0
|
||||
progress["currentIteration"] = 0
|
||||
progress["totalIterations"] = 0
|
||||
progress["currentStatus"] = "Finished"
|
||||
progress["isProcessing"] = False
|
||||
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
|
||||
socketio.emit(
|
||||
"generationResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
|
||||
try:
|
||||
generate.prompt2image(
|
||||
**generation_parameters,
|
||||
step_callback=image_progress,
|
||||
image_callback=image_done,
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except CanceledException:
|
||||
pass
|
||||
except Exception as e:
|
||||
socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
|
||||
"""
|
||||
END ADDITIONAL FUNCTIONS
|
||||
"""
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f">> Starting server at http://{host}:{port}")
|
||||
socketio.run(app, host=host, port=port)
|
||||
54
configs/autoencoder/autoencoder_kl_16x16x16.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 16
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,1,2,2,4] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [16]
|
||||
dropout: 0.0
|
||||
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
53
configs/autoencoder/autoencoder_kl_32x32x4.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 4
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
54
configs/autoencoder/autoencoder_kl_64x64x3.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 3
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
53
configs/autoencoder/autoencoder_kl_8x8x64.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-6
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: "val/rec_loss"
|
||||
embed_dim: 64
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 0.000001
|
||||
disc_weight: 0.5
|
||||
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 64
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,1,2,2,4,4] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [16,8]
|
||||
dropout: 0.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 12
|
||||
wrap: True
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetSRTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetSRValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: pil_nearest
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 1000
|
||||
max_images: 8
|
||||
increase_log_steps: True
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
accumulate_grad_batches: 2
|
||||
86
configs/latent-diffusion/celebahq-ldm-vq-4.yaml
Normal file
@@ -0,0 +1,86 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ckpt_path: models/first_stage_models/vq-f4/model.ckpt
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: taming.data.faceshq.CelebAHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: taming.data.faceshq.CelebAHQValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
98
configs/latent-diffusion/cin-ldm-vq-f8.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 256
|
||||
attention_resolutions:
|
||||
#note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 32 for f8
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 4
|
||||
n_embed: 16384
|
||||
ckpt_path: configs/first_stage_models/vq-f8/model.yaml
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 32
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 64
|
||||
num_workers: 12
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetTrain
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetValidation
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
68
configs/latent-diffusion/cin256-v2.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
model:
|
||||
base_learning_rate: 0.0001
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss
|
||||
use_ema: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 192
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 5
|
||||
num_heads: 1
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
n_classes: 1001
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
85
configs/latent-diffusion/ffhq-ldm-vq-4.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ckpt_path: configs/first_stage_models/vq-f4/model.yaml
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 42
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: taming.data.faceshq.FFHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: taming.data.faceshq.FFHQValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
85
configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
# note: this isn\t actually the resolution but
|
||||
# the downsampling factor, i.e. this corresnponds to
|
||||
# attention on spatial resolution 8,16,32, as the
|
||||
# spatial reolution of the latents is 64 for f4
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
ckpt_path: configs/first_stage_models/vq-f4/model.yaml
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNBedroomsTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNBedroomsValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
91
configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml
Normal file
@@ -0,0 +1,91 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-5 # set to target_lr by starting main.py with '--scale_lr False'
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0155
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: "image"
|
||||
cond_stage_key: "image"
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: False
|
||||
concat_mode: False
|
||||
scale_by_std: True
|
||||
monitor: 'val/loss_simple_ema'
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [10000]
|
||||
cycle_lengths: [10000000000000]
|
||||
f_start: [1.e-6]
|
||||
f_max: [1.]
|
||||
f_min: [ 1.]
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 192
|
||||
attention_resolutions: [ 1, 2, 4, 8 ] # 32, 16, 8, 4
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1,2,2,4,4 ] # 32, 16, 8, 4, 2
|
||||
num_heads: 8
|
||||
use_scale_shift_norm: True
|
||||
resblock_updown: True
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: "val/rec_loss"
|
||||
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
||||
ddconfig:
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: [ ]
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config: "__is_unconditional__"
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 96
|
||||
num_workers: 5
|
||||
wrap: False
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNChurchesTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNChurchesValidation
|
||||
params:
|
||||
size: 256
|
||||
|
||||
lightning:
|
||||
callbacks:
|
||||
image_logger:
|
||||
target: main.ImageLogger
|
||||
params:
|
||||
batch_frequency: 5000
|
||||
max_images: 8
|
||||
increase_log_steps: False
|
||||
|
||||
|
||||
trainer:
|
||||
benchmark: True
|
||||
71
configs/latent-diffusion/txt2img-1p4B-eval.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.012
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_heads: 8
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 1280
|
||||
use_checkpoint: true
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.BERTEmbedder
|
||||
params:
|
||||
n_embed: 1280
|
||||
n_layer: 32
|
||||
@@ -1,36 +1,20 @@
|
||||
# This file describes the alternative machine learning models
|
||||
# available to InvokeAI script.
|
||||
# available to the dream script.
|
||||
#
|
||||
# To add a new model, follow the examples below. Each
|
||||
# model requires a model config file, a weights file,
|
||||
# and the width and height of the images it
|
||||
# was trained on.
|
||||
|
||||
laion400m:
|
||||
config: configs/latent-diffusion/txt2img-1p4B-eval.yaml
|
||||
weights: models/ldm/text2img-large/model.ckpt
|
||||
description: Latent Diffusion LAION400M model
|
||||
width: 256
|
||||
height: 256
|
||||
stable-diffusion-1.4:
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
weights: ./models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
description: The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||
width: 512
|
||||
height: 512
|
||||
stable-diffusion-1.5:
|
||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||
weights: ./models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
default: true
|
||||
inpainting-1.5:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
|
||||
weights: ./models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
waifu-diffusion-1.3:
|
||||
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
||||
weights: ./models/ldm/stable-diffusion-v1/model-epoch09-float32.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
weights: models/ldm/stable-diffusion-v1/model.ckpt
|
||||
description: Stable Diffusion inference model version 1.4
|
||||
width: 512
|
||||
height: 512
|
||||
|
||||
68
configs/retrieval-augmented-diffusion/768x768.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
model:
|
||||
base_learning_rate: 0.0001
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.015
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: jpg
|
||||
cond_stage_key: nix
|
||||
image_size: 48
|
||||
channels: 16
|
||||
cond_stage_trainable: false
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_by_std: false
|
||||
scale_factor: 0.22765929
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 48
|
||||
in_channels: 16
|
||||
out_channels: 16
|
||||
model_channels: 448
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
use_scale_shift_norm: false
|
||||
resblock_updown: false
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: true
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 16
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 16
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: torch.nn.Identity
|
||||
@@ -76,4 +76,4 @@ model:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 7.5e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false # Note: different from the one we trained before
|
||||
conditioning_key: hybrid # important
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
finetune_keys: null
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
||||
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||
f_start: [ 1.e-6 ]
|
||||
f_max: [ 1. ]
|
||||
f_min: [ 1. ]
|
||||
|
||||
personalization_config:
|
||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
||||
@@ -1,74 +1,57 @@
|
||||
FROM ubuntu AS get_miniconda
|
||||
FROM debian
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
ARG gsd
|
||||
ENV GITHUB_STABLE_DIFFUSION $gsd
|
||||
|
||||
# install wget
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
wget \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ARG rsd
|
||||
ENV REQS $rsd
|
||||
|
||||
# download and install miniconda
|
||||
ARG conda_version=py39_4.12.0-Linux-x86_64
|
||||
ARG conda_prefix=/opt/conda
|
||||
RUN wget --progress=dot:giga -O /miniconda.sh \
|
||||
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
||||
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
||||
&& rm -f /miniconda.sh
|
||||
ARG cs
|
||||
ENV CONDA_SUBDIR $cs
|
||||
|
||||
FROM ubuntu AS invokeai
|
||||
ENV PIP_EXISTS_ACTION="w"
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
# TODO: Optimize image size
|
||||
|
||||
# clean bashrc
|
||||
RUN echo "" > ~/.bashrc
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
WORKDIR /
|
||||
RUN apt update && apt upgrade -y \
|
||||
&& apt install -y \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
&& git clone $GITHUB_STABLE_DIFFUSION
|
||||
|
||||
# clone repository and create symlinks
|
||||
ARG invokeai_git=https://github.com/invoke-ai/InvokeAI.git
|
||||
ARG project_name=invokeai
|
||||
RUN git clone ${invokeai_git} /${project_name} \
|
||||
&& mkdir /${project_name}/models/ldm/stable-diffusion-v1 \
|
||||
&& ln -s /data/models/sd-v1-4.ckpt /${project_name}/models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
&& ln -s /data/outputs/ /${project_name}/outputs
|
||||
# Install Anaconda or Miniconda
|
||||
COPY anaconda.sh .
|
||||
RUN bash anaconda.sh -b -u -p /anaconda && /anaconda/bin/conda init bash
|
||||
|
||||
# set workdir
|
||||
WORKDIR /${project_name}
|
||||
# SD
|
||||
WORKDIR /stable-diffusion
|
||||
RUN source ~/.bashrc \
|
||||
&& conda create -y --name ldm && conda activate ldm \
|
||||
&& conda config --env --set subdir $CONDA_SUBDIR \
|
||||
&& pip3 install -r $REQS \
|
||||
&& pip3 install basicsr facexlib realesrgan \
|
||||
&& mkdir models/ldm/stable-diffusion-v1 \
|
||||
&& ln -s "/data/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
|
||||
|
||||
# install conda env and preload models
|
||||
ARG conda_prefix=/opt/conda
|
||||
ARG conda_env_file=environment.yml
|
||||
COPY --from=get_miniconda ${conda_prefix} ${conda_prefix}
|
||||
RUN source ${conda_prefix}/etc/profile.d/conda.sh \
|
||||
&& conda init bash \
|
||||
&& source ~/.bashrc \
|
||||
&& conda env create \
|
||||
--name ${project_name} \
|
||||
--file ${conda_env_file} \
|
||||
&& rm -Rf ~/.cache \
|
||||
&& conda clean -afy \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc \
|
||||
&& ln -s /data/models/GFPGANv1.4.pth ./src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth \
|
||||
&& conda activate ${project_name} \
|
||||
&& python scripts/preload_models.py
|
||||
# Face restoreation
|
||||
# by default expected in a sibling directory to stable-diffusion
|
||||
WORKDIR /
|
||||
RUN git clone https://github.com/TencentARC/GFPGAN.git
|
||||
|
||||
# Copy entrypoint and set env
|
||||
ENV CONDA_PREFIX=${conda_prefix}
|
||||
ENV PROJECT_NAME=${project_name}
|
||||
COPY docker-build/entrypoint.sh /
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
WORKDIR /GFPGAN
|
||||
RUN pip3 install -r requirements.txt \
|
||||
&& python3 setup.py develop \
|
||||
&& ln -s "/data/GFPGANv1.4.pth" experiments/pretrained_models/GFPGANv1.4.pth
|
||||
|
||||
WORKDIR /stable-diffusion
|
||||
RUN python3 scripts/preload_models.py
|
||||
|
||||
WORKDIR /
|
||||
COPY entrypoint.sh .
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
@@ -1,81 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
|
||||
# configure values by using env when executing build.sh
|
||||
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml}
|
||||
invokeai_git=${INVOKEAI_GIT:-https://github.com/invoke-ai/InvokeAI.git}
|
||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||
|
||||
# print the settings
|
||||
echo "You are using these values:"
|
||||
echo -e "project_name:\t\t ${project_name}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
||||
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
||||
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
||||
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
|
||||
_runAlpine() {
|
||||
docker run \
|
||||
--rm \
|
||||
--interactive \
|
||||
--tty \
|
||||
--mount source="$volumename",target=/data \
|
||||
--workdir /data \
|
||||
alpine "$@"
|
||||
}
|
||||
|
||||
_copyCheckpoints() {
|
||||
echo "creating subfolders for models and outputs"
|
||||
_runAlpine mkdir models
|
||||
_runAlpine mkdir outputs
|
||||
echo -n "downloading sd-v1-4.ckpt"
|
||||
_runAlpine wget --header="Authorization: Bearer ${huggingface_token}" -O models/sd-v1-4.ckpt https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||
echo "done"
|
||||
echo "downloading GFPGANv1.4.pth"
|
||||
_runAlpine wget -O models/GFPGANv1.4.pth https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth
|
||||
}
|
||||
|
||||
_checkVolumeContent() {
|
||||
_runAlpine ls -lhA /data/models
|
||||
}
|
||||
|
||||
_getModelMd5s() {
|
||||
_runAlpine \
|
||||
alpine sh -c "md5sum /data/models/*"
|
||||
}
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
if [[ -z "$(_checkVolumeContent)" ]]; then
|
||||
echo "looks empty, copying checkpoint"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
echo "Models in ${volumename}:"
|
||||
_checkVolumeContent
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${volumename}"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
docker build \
|
||||
--platform="${platform}" \
|
||||
--tag "${invokeai_tag}" \
|
||||
--build-arg project_name="${project_name}" \
|
||||
--build-arg conda_version="${invokeai_conda_version}" \
|
||||
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||
--build-arg invokeai_git="${invokeai_git}" \
|
||||
--file ./docker-build/Dockerfile \
|
||||
.
|
||||
@@ -1,8 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
||||
conda activate "${PROJECT_NAME}"
|
||||
cd /stable-diffusion
|
||||
|
||||
python scripts/invoke.py \
|
||||
${@:---web --host=0.0.0.0}
|
||||
if [ $# -eq 0 ]; then
|
||||
python3 scripts/dream.py --full_precision -o /data
|
||||
# bash
|
||||
else
|
||||
python3 scripts/dream.py --full_precision -o /data "$@"
|
||||
fi
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
project_name=${PROJECT_NAME:-invokeai}
|
||||
volumename=${VOLUMENAME:-${project_name}_data}
|
||||
arch=${ARCH:-x86_64}
|
||||
platform=${PLATFORM:-Linux/${arch}}
|
||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
||||
|
||||
export project_name
|
||||
export volumename
|
||||
export arch
|
||||
export platform
|
||||
export invokeai_tag
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
--tty \
|
||||
--rm \
|
||||
--platform "$platform" \
|
||||
--name "$project_name" \
|
||||
--hostname "$project_name" \
|
||||
--mount source="$volumename",target=/data \
|
||||
--publish 9090:9090 \
|
||||
"$invokeai_tag" ${1:+$@}
|
||||
@@ -6,64 +6,64 @@ title: Changelog
|
||||
|
||||
## v2.0.1 (13 October 2022)
|
||||
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
## v2.0.0 <small>(9 October 2022)</small>
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for [inpainting](features/INPAINTING.md) and [outpainting](features/OUTPAINTING.md)
|
||||
- img2img runs on all k* samplers
|
||||
- Support for [negative prompts](features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for [post-processing of previously-generated images](features/POSTPROCESS.md)
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
- New `--hires` option on `invoke>` line allows [larger images to be created without duplicating elements](features/CLI.md#this-is-an-example-of-txt2img), at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see [Thresholding and Perlin Noise Initialization](features/OTHER.md#thresholding-and-perlin-noise-initialization-options))
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m">command-line completion behavior</a>.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved [command-line completion behavior](features/CLI.md)
|
||||
New commands added:
|
||||
* List command-line history with `!history`
|
||||
* Search command-line history with `!search`
|
||||
* Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
## v1.14 <small>(11 September 2022)</small>
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
## v1.13 <small>(3 September 2022)</small>
|
||||
|
||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
[Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
stable-diffusion-v1.5) to be added without altering the code.
|
||||
([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
---
|
||||
|
||||
@@ -88,7 +88,7 @@ title: Changelog
|
||||
Seed memory only extends back to the previous command, but will work on all images generated with the -n# switch.
|
||||
- Variant generation support temporarily disabled pending more general solution.
|
||||
- Created a feature branch named **yunsaki-morphing-invoke** which adds experimental support for
|
||||
iteratively modifying the prompt and its parameters. Please see[ Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
|
||||
iteratively modifying the prompt and its parameters. Please see[Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
|
||||
for a synopsis of how this works. Note that when this feature is eventually added to the main branch, it will may be modified
|
||||
significantly.
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 519 KiB |
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 519 KiB |
|
Before Width: | Height: | Size: 439 KiB |
|
Before Width: | Height: | Size: 587 KiB |
|
Before Width: | Height: | Size: 572 KiB |
|
Before Width: | Height: | Size: 557 KiB |
|
Before Width: | Height: | Size: 571 KiB |
|
Before Width: | Height: | Size: 570 KiB |
|
Before Width: | Height: | Size: 568 KiB |
|
Before Width: | Height: | Size: 527 KiB |
|
Before Width: | Height: | Size: 489 KiB |
|
Before Width: | Height: | Size: 503 KiB |
|
Before Width: | Height: | Size: 488 KiB |
|
Before Width: | Height: | Size: 499 KiB |
|
Before Width: | Height: | Size: 524 KiB |
|
Before Width: | Height: | Size: 593 KiB |
|
Before Width: | Height: | Size: 598 KiB |
|
Before Width: | Height: | Size: 488 KiB |
|
Before Width: | Height: | Size: 487 KiB |
|
Before Width: | Height: | Size: 489 KiB |
|
Before Width: | Height: | Size: 338 KiB |
|
Before Width: | Height: | Size: 59 KiB |
@@ -1,143 +0,0 @@
|
||||
---
|
||||
title: Changelog
|
||||
---
|
||||
|
||||
# :octicons-log-16: Changelog
|
||||
|
||||
## v1.13
|
||||
|
||||
- Supports a Google Colab notebook for a standalone server running on Google
|
||||
hardware [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- Output directory can be specified on the invoke> command line.
|
||||
- The grid was displaying duplicated images when not enough images to fill the
|
||||
final row [Muhammad Usama](https://github.com/SMUsamaShah)
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
|
||||
---
|
||||
|
||||
## v1.12 <small>(28 August 2022)</small>
|
||||
|
||||
- Improved file handling, including ability to read prompts from standard input.
|
||||
(kudos to [Yunsaki](https://github.com/yunsaki)
|
||||
- The web server is now integrated with the invoke.py script. Invoke by adding
|
||||
--web to the invoke.py command arguments.
|
||||
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
|
||||
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
|
||||
VRAM requirements are modestly reduced. Thanks to both
|
||||
[Blessedcoolant](https://github.com/blessedcoolant) and
|
||||
[Oceanswave](https://github.com/oceanswave) for their work on this.
|
||||
- You can now swap samplers on the invoke> command line.
|
||||
[Blessedcoolant](https://github.com/blessedcoolant)
|
||||
|
||||
---
|
||||
|
||||
## v1.11 <small>(26 August 2022)</small>
|
||||
|
||||
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module.
|
||||
(kudos to [Oceanswave](https://github.com/Oceanswave))
|
||||
- You now can specify a seed of -1 to use the previous image's seed, -2 to use
|
||||
the seed for the image generated before that, etc. Seed memory only extends
|
||||
back to the previous command, but will work on all images generated with the
|
||||
-n# switch.
|
||||
- Variant generation support temporarily disabled pending more general solution.
|
||||
- Created a feature branch named **yunsaki-morphing-invoke** which adds
|
||||
experimental support for iteratively modifying the prompt and its parameters.
|
||||
Please
|
||||
see[ Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86) for
|
||||
a synopsis of how this works. Note that when this feature is eventually added
|
||||
to the main branch, it will may be modified significantly.
|
||||
|
||||
---
|
||||
|
||||
## v1.10 <small>(25 August 2022)</small>
|
||||
|
||||
- A barebones but fully functional interactive web server for online generation
|
||||
of txt2img and img2img.
|
||||
|
||||
---
|
||||
|
||||
## v1.09 <small>(24 August 2022)</small>
|
||||
|
||||
- A new -v option allows you to generate multiple variants of an initial image
|
||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave).
|
||||
- [See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
||||
- Added ability to personalize text to image generation (kudos to
|
||||
[Oceanswave](https://github.com/Oceanswave) and
|
||||
[nicolai256](https://github.com/nicolai256))
|
||||
- Enabled all of the samplers from k_diffusion
|
||||
|
||||
---
|
||||
|
||||
## v1.08 <small>(24 August 2022)</small>
|
||||
|
||||
- Escape single quotes on the invoke> command before trying to parse. This avoids
|
||||
parse errors.
|
||||
- Removed instruction to get Python3.8 as first step in Windows install.
|
||||
Anaconda3 does it for you.
|
||||
- Added bounds checks for numeric arguments that could cause crashes.
|
||||
- Cleaned up the copyright and license agreement files.
|
||||
|
||||
---
|
||||
|
||||
## v1.07 <small>(23 August 2022)</small>
|
||||
|
||||
- Image filenames will now never fill gaps in the sequence, but will be assigned
|
||||
the next higher name in the chosen directory. This ensures that the alphabetic
|
||||
and chronological sort orders are the same.
|
||||
|
||||
---
|
||||
|
||||
## v1.06 <small>(23 August 2022)</small>
|
||||
|
||||
- Added weighted prompt support contributed by
|
||||
[xraxra](https://github.com/xraxra)
|
||||
- Example of using weighted prompts to tweak a demonic figure contributed by
|
||||
[bmaltais](https://github.com/bmaltais)
|
||||
|
||||
---
|
||||
|
||||
## v1.05 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Filenames now use the following formats: 000010.95183149.png -- Two files
|
||||
produced by the same command (e.g. -n2), 000010.26742632.png -- distinguished
|
||||
by a different seed.
|
||||
000011.455191342.01.png -- Two files produced by the same command using
|
||||
000011.455191342.02.png -- a batch size>1 (e.g. -b2). They have the same seed.
|
||||
000011.4160627868.grid#1-4.png -- a grid of four images (-g); the whole grid
|
||||
can be regenerated with the indicated key
|
||||
|
||||
- It should no longer be possible for one image to overwrite another
|
||||
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and
|
||||
retrieve the path of the output directory.
|
||||
|
||||
## v1.04 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Updated README to reflect installation of the released weights.
|
||||
- Suppressed very noisy and inconsequential warning when loading the frozen CLIP
|
||||
tokenizer.
|
||||
|
||||
## v1.03 <small>(22 August 2022)</small>
|
||||
|
||||
- The original txt2img and img2img scripts from the CompViz repository have been
|
||||
moved into a subfolder named "orig_scripts", to reduce confusion.
|
||||
|
||||
## v1.02 <small>(21 August 2022)</small>
|
||||
|
||||
- A copy of the prompt and all of its switches and options is now stored in the
|
||||
corresponding image in a tEXt metadata field named "Dream". You can read the
|
||||
prompt using scripts/images2prompt.py, or an image editor that allows you to
|
||||
explore the full metadata. **Please run "conda env update -f environment.yaml"
|
||||
to load the k_lms dependencies!!**
|
||||
|
||||
## v1.01 <small>(21 August 2022)</small>
|
||||
|
||||
- added k_lms sampling. **Please run "conda env update -f environment.yaml" to
|
||||
load the k_lms dependencies!!**
|
||||
- use half precision arithmetic by default, resulting in faster execution and
|
||||
lower memory requirements Pass argument --full_precision to invoke.py to get
|
||||
slower but more accurate image generation
|
||||
@@ -8,7 +8,7 @@ hide:
|
||||
|
||||
## **Interactive Command Line Interface**
|
||||
|
||||
The `invoke.py` script, located in `scripts/`, provides an interactive
|
||||
The `invoke.py` script, located in `scripts/dream.py`, provides an interactive
|
||||
interface to image generation similar to the "invoke mothership" bot that Stable
|
||||
AI provided on its Discord server.
|
||||
|
||||
@@ -86,7 +86,6 @@ overridden on a per-prompt basis (see [List of prompt arguments](#list-of-prompt
|
||||
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
|
||||
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
|
||||
| `--png_compression <0-9>` | `-z<0-9>` | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||
| `--safety-checker` | | False | Activate safety checker for NSFW and other potentially disturbing imagery |
|
||||
| `--web` | | `False` | Start in web server mode |
|
||||
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
|
||||
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
||||
@@ -98,12 +97,11 @@ overridden on a per-prompt basis (see [List of prompt arguments](#list-of-prompt
|
||||
| `--embedding_path <path>` | | `None` | Path to pre-trained embedding manager checkpoints, for custom models |
|
||||
| `--gfpgan_dir` | | `src/gfpgan` | Path to where GFPGAN is installed. |
|
||||
| `--gfpgan_model_path` | | `experiments/pretrained_models/GFPGANv1.4.pth` | Path to GFPGAN model file, relative to `--gfpgan_dir`. |
|
||||
| `--device <device>` | `-d<device>` | `torch.cuda.current_device()` | Device to run SD on, e.g. "cuda:0" |
|
||||
| `--free_gpu_mem` | | `False` | Free GPU memory after sampling, to allow image decoding and saving in low VRAM conditions |
|
||||
| `--precision` | | `auto` | Set model precision, default is selected by device. Options: auto, float32, float16, autocast |
|
||||
|
||||
!!! warning deprecated
|
||||
|
||||
These arguments are deprecated but still work:
|
||||
!!! warning "These arguments are deprecated but still work"
|
||||
|
||||
<div align="center" markdown>
|
||||
|
||||
@@ -132,7 +130,7 @@ from text ([txt2img](#txt2img)), to embellish an existing image or sketch
|
||||
|
||||
### txt2img
|
||||
|
||||
!!! example
|
||||
!!! example ""
|
||||
|
||||
```bash
|
||||
invoke> waterfall and rainbow -W640 -H480
|
||||
@@ -153,14 +151,12 @@ Here are the invoke> command that apply to txt2img:
|
||||
| --cfg_scale <float>| -C<float> | 7.5 | How hard to try to match the prompt to the generated image; any number greater than 1.0 works, but the useful range is roughly 5.0 to 20.0 |
|
||||
| --seed <int> | -S<int> | None | Set the random seed for the next series of images. This can be used to recreate an image generated previously.|
|
||||
| --sampler <sampler>| -A<sampler>| k_lms | Sampler to use. Use -h to get list of available samplers. |
|
||||
| --karras_max <int> | | 29 | When using k_* samplers, set the maximum number of steps before shifting from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts) This value is sticky. [29] |
|
||||
| --hires_fix | | | Larger images often have duplication artefacts. This option suppresses duplicates by generating the image at low res, and then using img2img to increase the resolution |
|
||||
| --png_compression <0-9> | -z<0-9> | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||
| `--png_compression <0-9>` | `-z<0-9>` | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||
| --grid | -g | False | Turn on grid mode to return a single image combining all the images generated by this prompt |
|
||||
| --individual | -i | True | Turn off grid mode (deprecated; leave off --grid instead) |
|
||||
| --outdir <path> | -o<path> | outputs/img_samples | Temporarily change the location of these images |
|
||||
| --seamless | | False | Activate seamless tiling for interesting effects |
|
||||
| --seamless_axes | | x,y | Specify which axes to use circular convolution on. |
|
||||
| --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt |
|
||||
| --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) |
|
||||
| --upscale <int> <float> | -U <int> <float> | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. |
|
||||
@@ -200,7 +196,7 @@ accepts additional options:
|
||||
|
||||
### inpainting
|
||||
|
||||
!!! example
|
||||
!!! example ""
|
||||
|
||||
```bash
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
|
||||
@@ -214,40 +210,11 @@ accepts additional options:
|
||||
[Inpainting](./INPAINTING.md) for details.
|
||||
|
||||
inpainting accepts all the arguments used for txt2img and img2img, as
|
||||
well as the --mask (-M) and --text_mask (-tm) arguments:
|
||||
well as the --mask (-M) argument:
|
||||
|
||||
| Argument <img width="100" align="right"/> | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| `--init_mask <path>` | `-M<path>` | `None` |Path to an image the same size as the initial_image, with areas for inpainting made transparent.|
|
||||
| `--invert_mask ` | | False |If true, invert the mask so that transparent areas are opaque and vice versa.|
|
||||
| `--text_mask <prompt> [<float>]` | `-tm <prompt> [<float>]` | <none> | Create a mask from a text prompt describing part of the image|
|
||||
|
||||
The mask may either be an image with transparent areas, in which case
|
||||
the inpainting will occur in the transparent areas only, or a black
|
||||
and white image, in which case all black areas will be painted into.
|
||||
|
||||
`--text_mask` (short form `-tm`) is a way to generate a mask using a
|
||||
text description of the part of the image to replace. For example, if
|
||||
you have an image of a breakfast plate with a bagel, toast and
|
||||
scrambled eggs, you can selectively mask the bagel and replace it with
|
||||
a piece of cake this way:
|
||||
|
||||
~~~
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||
~~~
|
||||
|
||||
The algorithm uses <a
|
||||
href="https://github.com/timojl/clipseg">clipseg</a> to classify
|
||||
different regions of the image. The classifier puts out a confidence
|
||||
score for each region it identifies. Generally regions that score
|
||||
above 0.5 are reliable, but if you are getting too much or too little
|
||||
masking you can adjust the threshold down (to get more mask), or up
|
||||
(to get less). In this example, by passing `-tm` a higher value, we
|
||||
are insisting on a more stringent classification.
|
||||
|
||||
~~~
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||
~~~
|
||||
|
||||
# Other Commands
|
||||
|
||||
@@ -289,20 +256,12 @@ Some examples:
|
||||
Outputs:
|
||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||
|
||||
### !mask
|
||||
|
||||
This command takes an image, a text prompt, and uses the `clipseg`
|
||||
algorithm to automatically generate a mask of the area that matches
|
||||
the text prompt. It is useful for debugging the text masking process
|
||||
prior to inpainting with the `--text_mask` argument. See
|
||||
[INPAINTING.md] for details.
|
||||
|
||||
## Model selection and importation
|
||||
# Model selection and importation
|
||||
|
||||
The CLI allows you to add new models on the fly, as well as to switch
|
||||
among them rapidly without leaving the script.
|
||||
|
||||
### !models
|
||||
## !models
|
||||
|
||||
This prints out a list of the models defined in `config/models.yaml'.
|
||||
The active model is bold-faced
|
||||
@@ -314,7 +273,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
### !switch <model>
|
||||
## !switch <model>
|
||||
|
||||
This quickly switches from one model to another without leaving the
|
||||
CLI script. `invoke.py` uses a memory caching system; once a model
|
||||
@@ -360,7 +319,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion cached Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
### !import_model <path/to/model/weights>
|
||||
## !import_model <path/to/model/weights>
|
||||
|
||||
This command imports a new model weights file into InvokeAI, makes it
|
||||
available for image generation within the script, and writes out the
|
||||
@@ -385,7 +344,7 @@ automatically.
|
||||
Example:
|
||||
|
||||
<pre>
|
||||
invoke> <b>!import_model models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt</b>
|
||||
invoke> <b>!import_model models/ldm/stable-diffusion-v1/ model-epoch08-float16.ckpt</b>
|
||||
>> Model import in process. Please enter the values needed to configure this model:
|
||||
|
||||
Name for this model: <b>waifu-diffusion</b>
|
||||
@@ -412,7 +371,7 @@ OK to import [n]? <b>y</b>
|
||||
invoke>
|
||||
</pre>
|
||||
|
||||
###!edit_model <name_of_model>
|
||||
##!edit_model <name_of_model>
|
||||
|
||||
The `!edit_model` command can be used to modify a model that is
|
||||
already defined in `config/models.yaml`. Call it with the short
|
||||
@@ -448,12 +407,20 @@ OK to import [n]? y
|
||||
Outputs:
|
||||
[2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix "outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512 -H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25
|
||||
```
|
||||
## History processing
|
||||
# History processing
|
||||
|
||||
The CLI provides a series of convenient commands for reviewing previous
|
||||
actions, retrieving them, modifying them, and re-running them.
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
|
||||
### !history
|
||||
Note that this command may behave unexpectedly if given a PNG file that
|
||||
was not generated by InvokeAI.
|
||||
|
||||
### `!history`
|
||||
|
||||
The invoke script keeps track of all the commands you issue during a
|
||||
session, allowing you to re-run them. On Mac and Linux systems, it
|
||||
@@ -478,41 +445,20 @@ invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
### !fetch
|
||||
## !fetch
|
||||
|
||||
This command retrieves the generation parameters from a previously
|
||||
generated image and either loads them into the command line
|
||||
(Linux|Mac), or prints them out in a comment for copy-and-paste
|
||||
(Windows). You may provide either the name of a file in the current
|
||||
output directory, or a full file path. Specify path to a folder with
|
||||
image png files, and wildcard *.png to retrieve the dream command used
|
||||
to generate the images, and save them to a file commands.txt for
|
||||
further processing.
|
||||
generated image and either loads them into the command line. You may
|
||||
provide either the name of a file in the current output directory, or
|
||||
a full file path.
|
||||
|
||||
This example loads the generation command for a single png file:
|
||||
|
||||
```bash
|
||||
~~~
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
|
||||
This one fetches the generation commands from a batch of files and
|
||||
stores them into `selected.txt`:
|
||||
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
|
||||
### !replay
|
||||
|
||||
This command replays a text file generated by !fetch or created manually
|
||||
|
||||
~~~
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
~~~
|
||||
|
||||
Note that these commands may behave unexpectedly if given a PNG file that
|
||||
Note that this command may behave unexpectedly if given a PNG file that
|
||||
was not generated by InvokeAI.
|
||||
|
||||
### !search <search string>
|
||||
|
||||
@@ -17,15 +17,15 @@ tree on a hill with a river, nature photograph, national geographic -I./test-pic
|
||||
|
||||
This will take the original image shown here:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
<img src="https://user-images.githubusercontent.com/50542132/193946000-c42a96d8-5a74-4f8a-b4c3-5213e6cadcce.png" width=350>
|
||||
</div>
|
||||
</figure>
|
||||
|
||||
and generate a new image based on it as shown here:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
<img src="https://user-images.githubusercontent.com/111189/194135515-53d4c060-e994-4016-8121-7c685e281ac9.png" width=350>
|
||||
</div>
|
||||
</figure>
|
||||
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength` (`-f`) controls how much
|
||||
the original will be modified, ranging from `0.0` (keep the original intact), to `1.0` (ignore the
|
||||
@@ -41,11 +41,10 @@ interesting variants.
|
||||
Note that the prompt makes a big difference. For example, this slight variation on the prompt produces
|
||||
a very different image:
|
||||
|
||||
`photograph of a tree on a hill with a river`
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
<img src="https://user-images.githubusercontent.com/111189/194135220-16b62181-b60c-4248-8989-4834a8fd7fbd.png" width=350>
|
||||
</div>
|
||||
<caption markdown>photograph of a tree on a hill with a river</caption>
|
||||
</figure>
|
||||
|
||||
!!! tip
|
||||
|
||||
@@ -79,9 +78,9 @@ gaussian noise and progressively refines it over the requested number of steps,
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains.
|
||||
|
||||
@@ -91,21 +90,21 @@ Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||
|
||||
I want SD to draw a fire based on this hand-drawn image:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength is `0.7`, this is what the internal steps the algorithm has to take will look like:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
With strength `0.4`, the steps look more like this:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to `0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||
|
||||
@@ -121,6 +120,8 @@ Both of the outputs look kind of like what I was thinking of. With the strength
|
||||
|
||||
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `"fire"`:
|
||||
|
||||
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `fire`:
|
||||
|
||||
```commandline
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
||||
```
|
||||
@@ -137,9 +138,9 @@ Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure S
|
||||
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
||||
```
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
and here is strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image):
|
||||
|
||||
@@ -147,29 +148,38 @@ and here is strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` t
|
||||
invoke> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
|
||||
```
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
In both cases the image is nice and clean and "finished", but because at strength `0.7` Stable Diffusion has been give so much more freedom to improve on my badly-drawn flames, they've come out looking much better. You can really see the difference when looking at the latent steps. There's more noise on the first image with strength `0.7`:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
than there is for strength `0.4`:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image.
|
||||
|
||||
Unfortunately, it seems that `img2img` is very sensitive to the step count. Here's strength `0.7` with a step count of `29` (SD did 19 steps from my image):
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
By comparing the latents we can sort of see that something got interpreted differently enough on the third or fourth step to lead to a rather different interpretation of the flames.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see [stable diffusion blog](https://huggingface.co/blog/stable_diffusion) for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step.
|
||||
|
||||
@@ -34,188 +34,9 @@ original unedited image and the masked (partially transparent) image:
|
||||
invoke> "man with cat on shoulder" -I./images/man.png -M./images/man-transparent.png
|
||||
```
|
||||
|
||||
## **Masking using Text**
|
||||
We are hoping to get rid of the need for this workaround in an upcoming release.
|
||||
|
||||
You can also create a mask using a text prompt to select the part of
|
||||
the image you want to alter, using the <a
|
||||
href="https://github.com/timojl/clipseg">clipseg</a> algorithm. This
|
||||
works on any image, not just ones generated by InvokeAI.
|
||||
|
||||
The `--text_mask` (short form `-tm`) option takes two arguments. The
|
||||
first argument is a text description of the part of the image you wish
|
||||
to mask (paint over). If the text description contains a space, you must
|
||||
surround it with quotation marks. The optional second argument is the
|
||||
minimum threshold for the mask classifier's confidence score, described
|
||||
in more detail below.
|
||||
|
||||
To see how this works in practice, here's an image of a still life
|
||||
painting that I got off the web.
|
||||
|
||||
<img src="../assets/still-life-scaled.jpg">
|
||||
|
||||
You can selectively mask out the
|
||||
orange and replace it with a baseball in this way:
|
||||
|
||||
~~~
|
||||
invoke> a baseball -I /path/to/still_life.png -tm orange
|
||||
~~~
|
||||
|
||||
<img src="../assets/still-life-inpainted.png">
|
||||
|
||||
The clipseg classifier produces a confidence score for each region it
|
||||
identifies. Generally regions that score above 0.5 are reliable, but
|
||||
if you are getting too much or too little masking you can adjust the
|
||||
threshold down (to get more mask), or up (to get less). In this
|
||||
example, by passing `-tm` a higher value, we are insisting on a tigher
|
||||
mask. However, if you make it too high, the orange may not be picked
|
||||
up at all!
|
||||
|
||||
~~~
|
||||
invoke> a baseball -I /path/to/breakfast.png -tm orange 0.6
|
||||
~~~
|
||||
|
||||
The `!mask` command may be useful for debugging problems with the
|
||||
text2mask feature. The syntax is `!mask /path/to/image.png -tm <text>
|
||||
<threshold>`
|
||||
|
||||
It will generate three files:
|
||||
|
||||
- The image with the selected area highlighted.
|
||||
- it will be named XXXXX.<imagename>.<prompt>.selected.png
|
||||
- The image with the un-selected area highlighted.
|
||||
- it will be named XXXXX.<imagename>.<prompt>.deselected.png
|
||||
- The image with the selected area converted into a black and white
|
||||
image according to the threshold level
|
||||
- it will be named XXXXX.<imagename>.<prompt>.masked.png
|
||||
|
||||
The `.masked.png` file can then be directly passed to the `invoke>`
|
||||
prompt in the CLI via the `-M` argument. Do not attempt this with
|
||||
the `selected.png` or `deselected.png` files, as they contain some
|
||||
transparency throughout the image and will not produce the desired
|
||||
results.
|
||||
|
||||
Here is an example of how `!mask` works:
|
||||
|
||||
```
|
||||
invoke> !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
>> generating masks from ./test-pictures/curly.png
|
||||
>> Initializing clipseg model for text to mask inference
|
||||
Outputs:
|
||||
[941.1] outputs/img-samples/000019.curly.hair.deselected.png: !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
[941.2] outputs/img-samples/000019.curly.hair.selected.png: !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
[941.3] outputs/img-samples/000019.curly.hair.masked.png: !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
```
|
||||
|
||||
**Original image "curly.png"**
|
||||
<img src="../assets/outpainting/curly.png">
|
||||
|
||||
**000019.curly.hair.selected.png**
|
||||
<img src="../assets/inpainting/000019.curly.hair.selected.png">
|
||||
|
||||
**000019.curly.hair.deselected.png**
|
||||
<img src="../assets/inpainting/000019.curly.hair.deselected.png">
|
||||
|
||||
**000019.curly.hair.masked.png**
|
||||
<img src="../assets/inpainting/000019.curly.hair.masked.png">
|
||||
|
||||
It looks like we selected the hair pretty well at the 0.5 threshold
|
||||
(which is the default, so we didn't actually have to specify it), so
|
||||
let's have some fun:
|
||||
|
||||
```
|
||||
invoke> medusa with cobras -I ./test-pictures/curly.png -M 000019.curly.hair.masked.png -C20
|
||||
>> loaded input image of size 512x512 from ./test-pictures/curly.png
|
||||
...
|
||||
Outputs:
|
||||
[946] outputs/img-samples/000024.801380492.png: "medusa with cobras" -s 50 -S 801380492 -W 512 -H 512 -C 20.0 -I ./test-pictures/curly.png -A k_lms -f 0.75
|
||||
```
|
||||
|
||||
<img src="../assets/inpainting/000024.801380492.png">
|
||||
|
||||
You can also skip the `!mask` creation step and just select the masked
|
||||
|
||||
region directly:
|
||||
```
|
||||
invoke> medusa with cobras -I ./test-pictures/curly.png -tm hair -C20
|
||||
```
|
||||
|
||||
## Using the RunwayML inpainting model
|
||||
|
||||
The [RunwayML Inpainting Model
|
||||
v1.5](https://huggingface.co/runwayml/stable-diffusion-inpainting) is
|
||||
a specialized version of [Stable Diffusion
|
||||
v1.5](https://huggingface.co/spaces/runwayml/stable-diffusion-v1-5)
|
||||
that contains extra channels specifically designed to enhance
|
||||
inpainting and outpainting. While it can do regular `txt2img` and
|
||||
`img2img`, it really shines when filling in missing regions. It has an
|
||||
almost uncanny ability to blend the new regions with existing ones in
|
||||
a semantically coherent way.
|
||||
|
||||
To install the inpainting model, follow the
|
||||
[instructions](INSTALLING-MODELS.md) for installing a new model. You
|
||||
may use either the CLI (`invoke.py` script) or directly edit the
|
||||
`configs/models.yaml` configuration file to do this. The main thing to
|
||||
watch out for is that the the model `config` option must be set up to
|
||||
use `v1-inpainting-inference.yaml` rather than the `v1-inference.yaml`
|
||||
file that is used by Stable Diffusion 1.4 and 1.5.
|
||||
|
||||
After installation, your `models.yaml` should contain an entry that
|
||||
looks like this one:
|
||||
|
||||
inpainting-1.5:
|
||||
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||
description: SD inpainting v1.5
|
||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
|
||||
As shown in the example, you may include a VAE fine-tuning weights
|
||||
file as well. This is strongly recommended.
|
||||
|
||||
To use the custom inpainting model, launch `invoke.py` with the
|
||||
argument `--model inpainting-1.5` or alternatively from within the
|
||||
script use the `!switch inpainting-1.5` command to load and switch to
|
||||
the inpainting model.
|
||||
|
||||
You can now do inpainting and outpainting exactly as described above,
|
||||
but there will (likely) be a noticeable improvement in
|
||||
coherence. Txt2img and Img2img will work as well.
|
||||
|
||||
There are a few caveats to be aware of:
|
||||
|
||||
1. The inpainting model is larger than the standard model, and will
|
||||
use nearly 4 GB of GPU VRAM. This makes it unlikely to run on
|
||||
a 4 GB graphics card.
|
||||
|
||||
2. When operating in Img2img mode, the inpainting model is much less
|
||||
steerable than the standard model. It is great for making small
|
||||
changes, such as changing the pattern of a fabric, or slightly
|
||||
changing a subject's expression or hair, but the model will
|
||||
resist making the dramatic alterations that the standard
|
||||
model lets you do.
|
||||
|
||||
3. While the `--hires` option works fine with the inpainting model,
|
||||
some special features, such as `--embiggen` are disabled.
|
||||
|
||||
4. Prompt weighting (`banana++ sushi`) and merging work well with
|
||||
the inpainting model, but prompt swapping (a ("fluffy cat").swap("smiling dog") eating a hotdog`)
|
||||
will not have any effect due to the way the model is set up.
|
||||
You may use text masking (with `-tm thing-to-mask`) as an
|
||||
effective replacement.
|
||||
|
||||
5. The model tends to oversharpen image if you use high step or CFG
|
||||
values. If you need to do large steps, use the standard model.
|
||||
|
||||
6. The `--strength` (`-f`) option has no effect on the inpainting
|
||||
model due to its fundamental differences with the standard
|
||||
model. It will always take the full number of steps you specify.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Here are some troubleshooting tips for inpainting and outpainting.
|
||||
|
||||
## Inpainting is not changing the masked region enough!
|
||||
### Inpainting is not changing the masked region enough!
|
||||
|
||||
One of the things to understand about how inpainting works is that it
|
||||
is equivalent to running img2img on just the masked (transparent)
|
||||
@@ -257,28 +78,40 @@ surrounding unmasked regions as well.
|
||||
|
||||
1. Open image in Photoshop
|
||||
|
||||
<div align="center" markdown></div>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
2. Use any of the selection tools (Marquee, Lasso, or Wand) to select the area you desire to inpaint.
|
||||
|
||||
<div align="center" markdown></div>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
3. Because we'll be applying a mask over the area we want to preserve, you should now select the inverse by using the ++shift+ctrl+i++ shortcut, or right clicking and using the "Select Inverse" option.
|
||||
|
||||
4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the underlying image, or your inpainting results will be dramatically impacted.
|
||||
|
||||
<div align="center" markdown></div>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
5. Make sure to hide any background layers that are present. You should see the mask applied to your image layer, and the image on your canvas should display the checkered background.
|
||||
|
||||
<div align="center" markdown></div>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
6. Save the image as a transparent PNG by using `File`-->`Save a Copy` from the menu bar, or by using the keyboard shortcut ++alt+ctrl+s++
|
||||
|
||||
<div align="center" markdown></div>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
7. After following the inpainting instructions above (either through the CLI or the Web UI), marvel at your newfound ability to selectively invoke. Lookin' good!
|
||||
|
||||
<div align="center" markdown></div>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
8. In the export dialogue, Make sure the "Save colour values from transparent pixels" checkbox is selected.
|
||||
|
||||
@@ -26,12 +26,6 @@ for each `invoke>` prompt as shown here:
|
||||
invoke> "pond garden with lotus by claude monet" --seamless -s100 -n4
|
||||
```
|
||||
|
||||
By default this will tile on both the X and Y axes. However, you can also specify specific axes to tile on with `--seamless_axes`.
|
||||
Possible values are `x`, `y`, and `x,y`:
|
||||
```python
|
||||
invoke> "pond garden with lotus by claude monet" --seamless --seamless_axes=x -s100 -n4
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## **Shortcuts: Reusing Seeds**
|
||||
@@ -75,23 +69,6 @@ combination of integers and floating point numbers, and they do not need to add
|
||||
|
||||
---
|
||||
|
||||
## **Filename Format**
|
||||
|
||||
The argument `--fnformat` allows to specify the filename of the
|
||||
image. Supported wildcards are all arguments what can be set such as
|
||||
`perlin`, `seed`, `threshold`, `height`, `width`, `gfpgan_strength`,
|
||||
`sampler_name`, `steps`, `model`, `upscale`, `prompt`, `cfg_scale`,
|
||||
`prefix`.
|
||||
|
||||
The following prompt
|
||||
```bash
|
||||
dream> a red car --steps 25 -C 9.8 --perlin 0.1 --fnformat {prompt}_steps.{steps}_cfg.{cfg_scale}_perlin.{perlin}.png
|
||||
```
|
||||
|
||||
generates a file with the name: `outputs/img-samples/a red car_steps.25_cfg.9.8_perlin.0.1.png`
|
||||
|
||||
---
|
||||
|
||||
## **Thresholding and Perlin Noise Initialization Options**
|
||||
|
||||
Two new options are the thresholding (`--threshold`) and the perlin noise initialization (`--perlin`) options. Thresholding limits the range of the latent values during optimization, which helps combat oversaturation with higher CFG scale values. Perlin noise initialization starts with a percentage (a value ranging from 0 to 1) of perlin noise mixed into the initial noise. Both features allow for more variations and options in the course of generating images.
|
||||
|
||||
@@ -15,58 +15,19 @@ InvokeAI supports two versions of outpainting, one called "outpaint"
|
||||
and the other "outcrop." They work slightly differently and each has
|
||||
its advantages and drawbacks.
|
||||
|
||||
### Outpainting
|
||||
|
||||
Outpainting is the same as inpainting, except that the painting occurs
|
||||
in the regions outside of the original image. To outpaint using the
|
||||
`invoke.py` command line script, prepare an image in which the borders
|
||||
to be extended are pure black. Add an alpha channel (if there isn't one
|
||||
already), and make the borders completely transparent and the interior
|
||||
completely opaque. If you wish to modify the interior as well, you may
|
||||
create transparent holes in the transparency layer, which `img2img` will
|
||||
paint into as usual.
|
||||
|
||||
Pass the image as the argument to the `-I` switch as you would for
|
||||
regular inpainting:
|
||||
|
||||
invoke> a stream by a river -I /path/to/transparent_img.png
|
||||
|
||||
You'll likely be delighted by the results.
|
||||
|
||||
### Tips
|
||||
|
||||
1. Do not try to expand the image too much at once. Generally it is best
|
||||
to expand the margins in 64-pixel increments. 128 pixels often works,
|
||||
but your mileage may vary depending on the nature of the image you are
|
||||
trying to outpaint into.
|
||||
|
||||
2. There are a series of switches that can be used to adjust how the
|
||||
inpainting algorithm operates. In particular, you can use these to
|
||||
minimize the seam that sometimes appears between the original image
|
||||
and the extended part. These switches are:
|
||||
|
||||
--seam_size SEAM_SIZE Size of the mask around the seam between original and outpainted image (0)
|
||||
--seam_blur SEAM_BLUR The amount to blur the seam inwards (0)
|
||||
--seam_strength STRENGTH The img2img strength to use when filling the seam (0.7)
|
||||
--seam_steps SEAM_STEPS The number of steps to use to fill the seam. (10)
|
||||
--tile_size TILE_SIZE The tile size to use for filling outpaint areas (32)
|
||||
|
||||
### Outcrop
|
||||
|
||||
The `outcrop` extension gives you a convenient `!fix` postprocessing
|
||||
command that allows you to extend a previously-generated image in 64
|
||||
pixel increments in any direction. You can apply the module to any
|
||||
image previously-generated by InvokeAI. Note that it works with
|
||||
arbitrary PNG photographs, but not currently with JPG or other
|
||||
formats. Outcropping is particularly effective when combined with the
|
||||
[runwayML custom inpainting
|
||||
model](INPAINTING.md#using-the-runwayml-inpainting-model).
|
||||
The `outcrop` extension allows you to extend the image in 64 pixel
|
||||
increments in any dimension. You can apply the module to any image
|
||||
previously-generated by InvokeAI. Note that it will **not** work with
|
||||
arbitrary photographs or Stable Diffusion images created by other
|
||||
implementations.
|
||||
|
||||
Consider this image:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Pretty nice, but it's annoying that the top of her head is cut
|
||||
off. She's also a bit off center. Let's fix that!
|
||||
@@ -83,9 +44,9 @@ specify any number of pixels to extend. You can also abbreviate
|
||||
|
||||
The result looks like this:
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
The new image is actually slightly larger than the original (576x576,
|
||||
because 64 pixels were added to the top and right sides.)
|
||||
@@ -103,3 +64,42 @@ you'll get a slightly different result. You can run it repeatedly
|
||||
until you get an image you like. Unfortunately `!fix` does not
|
||||
currently respect the `-n` (`--iterations`) argument.
|
||||
|
||||
## Outpaint
|
||||
|
||||
The `outpaint` extension does the same thing, but with subtle
|
||||
differences. Starting with the same image, here is how we would add an
|
||||
additional 64 pixels to the top of the image:
|
||||
|
||||
```bash
|
||||
invoke> !fix images/curly.png --out_direction top 64
|
||||
```
|
||||
|
||||
(you can abbreviate `--out_direction` as `-D`.
|
||||
|
||||
The result is shown here:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Although the effect is similar, there are significant differences from
|
||||
outcropping:
|
||||
|
||||
- You can only specify one direction to extend at a time.
|
||||
- The image is **not** resized. Instead, the image is shifted by the specified
|
||||
number of pixels. If you look carefully, you'll see that less of the lady's
|
||||
torso is visible in the image.
|
||||
- Because the image dimensions remain the same, there's no rounding
|
||||
to multiples of 64.
|
||||
- Attempting to outpaint larger areas will frequently give rise to ugly
|
||||
ghosting effects.
|
||||
- For best results, try increasing the step number.
|
||||
- If you don't specify a pixel value in `-D`, it will default to half
|
||||
of the whole image, which is likely not what you want.
|
||||
|
||||
!!! tip
|
||||
|
||||
Neither `outpaint` nor `outcrop` are perfect, but we continue to tune
|
||||
and improve them. If one doesn't work, try the other. You may also
|
||||
wish to experiment with other `img2img` arguments, such as `-C`, `-f`
|
||||
and `-s`.
|
||||
|
||||
@@ -45,35 +45,35 @@ Here's a prompt that depicts what it does.
|
||||
|
||||
original prompt:
|
||||
|
||||
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
That image has a woman, so if we want the horse without a rider, we can influence the image not to have a woman by putting [woman] in the prompt, like this:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
That's nice - but say we also don't want the image to be quite so blue. We can add "blue" to the list of negative prompts, so it's now [woman blue]:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Getting close - but there's no sense in having a saddle when our horse doesn't have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
!!! notes "Notes about this feature:"
|
||||
|
||||
@@ -84,109 +84,6 @@ Getting close - but there's no sense in having a saddle when our horse doesn't h
|
||||
|
||||
---
|
||||
|
||||
## **Prompt Syntax Features**
|
||||
|
||||
The InvokeAI prompting language has the following features:
|
||||
|
||||
### Attention weighting
|
||||
Append a word or phrase with `-` or `+`, or a weight between `0` and `2` (`1`=default), to decrease or increase "attention" (= a mix of per-token CFG weighting multiplier and, for `-`, a weighted blend with the prompt without the term).
|
||||
|
||||
The following syntax is recognised:
|
||||
* single words without parentheses: `a tall thin man picking apricots+`
|
||||
* single or multiple words with parentheses: `a tall thin man picking (apricots)+` `a tall thin man picking (apricots)-` `a tall thin man (picking apricots)+` `a tall thin man (picking apricots)-`
|
||||
* more effect with more symbols `a tall thin man (picking apricots)++`
|
||||
* nesting `a tall thin man (picking apricots+)++` (`apricots` effectively gets `+++`)
|
||||
* all of the above with explicit numbers `a tall thin man picking (apricots)1.1` `a tall thin man (picking (apricots)1.3)1.1`. (`+` is equivalent to 1.1, `++` is pow(1.1,2), `+++` is pow(1.1,3), etc; `-` means 0.9, `--` means pow(0.9,2), etc.)
|
||||
* attention also applies to `[unconditioning]` so `a tall thin man picking apricots [(ladder)0.01]` will *very gently* nudge SD away from trying to draw the man on a ladder
|
||||
|
||||
You can use this to increase or decrease the amount of something. Starting from this prompt of `a man picking apricots from a tree`, let's see what happens if we increase and decrease how much attention we want Stable Diffusion to pay to the word `apricots`:
|
||||
|
||||

|
||||
|
||||
Using `-` to reduce apricot-ness:
|
||||
|
||||
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
||||
| -- | -- | -- |
|
||||
|  |  |  |
|
||||
|
||||
Using `+` to increase apricot-ness:
|
||||
|
||||
| `a man picking apricots+ from a tree` | `a man picking apricots++ from a tree` | `a man picking apricots+++ from a tree` | `a man picking apricots++++ from a tree` | `a man picking apricots+++++ from a tree` |
|
||||
| -- | -- | -- | -- | -- |
|
||||
|  |  |  |  |  |
|
||||
|
||||
You can also change the balance between different parts of a prompt. For example, below is a `mountain man`:
|
||||
|
||||

|
||||
|
||||
And here he is with more mountain:
|
||||
|
||||
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
||||
| -- | -- | -- |
|
||||
|  |  |  |
|
||||
|
||||
Or, alternatively, with more man:
|
||||
|
||||
| `mountain man+` | `mountain man++` | `mountain man+++` | `mountain man++++` |
|
||||
| -- | -- | -- | -- |
|
||||
|  |  |  |  |
|
||||
|
||||
### Blending between prompts
|
||||
|
||||
* `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)`
|
||||
* The existing prompt blending using `:<weight>` will continue to be supported - `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)` is equivalent to `a tall thin man picking apricots:1 a tall thin man picking pears:1` in the old syntax.
|
||||
* Attention weights can be nested inside blends.
|
||||
* Non-normalized blends are supported by passing `no_normalize` as an additional argument to the blend weights, eg `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,-1,no_normalize)`. very fun to explore local maxima in the feature space, but also easy to produce garbage output.
|
||||
|
||||
See the section below on "Prompt Blending" for more information about how this works.
|
||||
|
||||
### Cross-Attention Control ('prompt2prompt')
|
||||
|
||||
Sometimes an image you generate is almost right, and you just want to
|
||||
change one detail without affecting the rest. You could use a photo editor and inpainting
|
||||
to overpaint the area, but that's a pain. Here's where `prompt2prompt`
|
||||
comes in handy.
|
||||
|
||||
Generate an image with a given prompt, record the seed of the image,
|
||||
and then use the `prompt2prompt` syntax to substitute words in the
|
||||
original prompt for words in a new prompt. This works for `img2img` as well.
|
||||
|
||||
* `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
||||
* quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
||||
* for single word substitutions parentheses are also optional: `a cat.swap(dog) eating a hotdog`.
|
||||
* Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely corresponding to bloc97's `prompt_edit_spatial_start/_end` and `prompt_edit_tokens_start/_end` but with the math swapped to make it easier to intuitively understand.
|
||||
* Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end` argument means that the "spatial" (self-attention) edit will stop having any effect after 30% (=0.3) of the steps have been done, leaving Stable Diffusion with 70% of the steps where it is free to decide for itself how to reshape the cat-form into a dog form.
|
||||
* The numbers represent a percentage through the step sequence where the edits should happen. 0 means the start (noisy starting image), 1 is the end (final image).
|
||||
* For img2img, the step sequence does not start at 0 but instead at (1-strength) - so if strength is 0.7, s_start and s_end must both be greater than 0.3 (1-0.7) to have any effect.
|
||||
* Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable Diffusion should have to change the shape of the subject being swapped.
|
||||
* `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
||||
|
||||
|
||||
|
||||
The `prompt2prompt` code is based off [bloc97's
|
||||
colab](https://github.com/bloc97/CrossAttentionControl).
|
||||
|
||||
Note that `prompt2prompt` is not currently working with the runwayML
|
||||
inpainting model, and may never work due to the way this model is set
|
||||
up. If you attempt to use `prompt2prompt` you will get the original
|
||||
image back. However, since this model is so good at inpainting, a
|
||||
good substitute is to use the `clipseg` text masking option:
|
||||
|
||||
```
|
||||
invoke> a fluffy cat eating a hotdot
|
||||
Outputs:
|
||||
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
||||
invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat
|
||||
```
|
||||
|
||||
### Escaping parantheses () and speech marks ""
|
||||
|
||||
If the model you are using has parentheses () or speech marks "" as
|
||||
part of its syntax, you will need to "escape" these using a backslash,
|
||||
so that`(my_keyword)` becomes `\(my_keyword\)`. Otherwise, the prompt
|
||||
parser will attempt to interpret the parentheses as part of the prompt
|
||||
syntax and it will get confused.
|
||||
|
||||
## **Prompt Blending**
|
||||
|
||||
You may blend together different sections of the prompt to explore the
|
||||
@@ -215,56 +112,56 @@ different results each time you run them.
|
||||
|
||||
---
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
### "blue sphere, red cube, hybrid"
|
||||
</div>
|
||||
</figure>
|
||||
|
||||
This example doesn't use melding at all and represents the default way
|
||||
of mixing concepts.
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
It's interesting to see how the AI expressed the concept of "cube" as
|
||||
the four quadrants of the enclosing frame. If you look closely, there
|
||||
is depth there, so the enclosing frame is actually a cube.
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Now that's interesting. We get neither a blue sphere nor a red cube,
|
||||
but a red sphere embedded in a brick wall, which represents a melding
|
||||
of concepts within the AI's "latent space" of semantic
|
||||
representations. Where is Ludwig Wittgenstein when you need him?
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Definitely more blue-spherey. The cube is gone entirely, but it's
|
||||
really cool abstract art.
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Whoa...! I see blue and red, but no spheres or cubes. Is the word
|
||||
"hybrid" summoning up the concept of some sort of scifi creature?
|
||||
Let's find out.
|
||||
|
||||
<div align="center" markdown>
|
||||
<figure markdown>
|
||||
### "blue sphere:0.5 red cube:0.5"
|
||||
|
||||

|
||||
</div>
|
||||
</figure>
|
||||
|
||||
Indeed, removing the word "hybrid" produces an image that is more like
|
||||
what we'd expect.
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
# **WebUI Hotkey List**
|
||||
|
||||
## General
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ------------ | ---------------------- |
|
||||
| a | Set All Parameters |
|
||||
| s | Set Seed |
|
||||
| u | Upscale |
|
||||
| r | Restoration |
|
||||
| i | Show Metadata |
|
||||
| Ddl | Delete Image |
|
||||
| alt + a | Focus prompt input |
|
||||
| shift + i | Send To Image to Image |
|
||||
| ctrl + enter | Start processing |
|
||||
| shift + x | cancel Processing |
|
||||
| shift + d | Toggle Dark Mode |
|
||||
| ` | Toggle console |
|
||||
|
||||
## Tabs
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ------- | ------------------------- |
|
||||
| 1 | Go to Text To Image Tab |
|
||||
| 2 | Go to Image to Image Tab |
|
||||
| 3 | Go to Inpainting Tab |
|
||||
| 4 | Go to Outpainting Tab |
|
||||
| 5 | Go to Nodes Tab |
|
||||
| 6 | Go to Post Processing Tab |
|
||||
|
||||
## Gallery
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ------------ | ------------------------------- |
|
||||
| g | Toggle Gallery |
|
||||
| left arrow | Go to previous image in gallery |
|
||||
| right arrow | Go to next image in gallery |
|
||||
| shift + p | Pin gallery |
|
||||
| shift + up | Increase gallery image size |
|
||||
| shift + down | Decrease gallery image size |
|
||||
| shift + r | Reset image gallery size |
|
||||
|
||||
## Inpainting
|
||||
|
||||
| Setting | Hotkey |
|
||||
| -------------------------- | --------------------- |
|
||||
| [ | Decrease brush size |
|
||||
| ] | Increase brush size |
|
||||
| alt + [ | Decrease mask opacity |
|
||||
| alt + ] | Increase mask opacity |
|
||||
| b | Select brush |
|
||||
| e | Select eraser |
|
||||
| ctrl + z | Undo brush stroke |
|
||||
| ctrl + shift + z, ctrl + y | Redo brush stroke |
|
||||
| h | Hide mask |
|
||||
| shift + m | Invert mask |
|
||||
| shift + c | Clear mask |
|
||||
| shift + j | Expand canvas |
|
||||
@@ -86,74 +86,57 @@ You wil need one of the following:
|
||||
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
!!! note
|
||||
!!! info
|
||||
|
||||
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
||||
full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
To run in full-precision mode, start `invoke.py` with the `--full_precision` flag:
|
||||
Precision is auto configured based on the device. If however you encounter errors like
|
||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||
`invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||
```
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
### v2.0.1 <small>(13 October 2022)</small>
|
||||
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
### v2.0.0 <small>(9 October 2022)</small>
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
* List command-line history with `!history`
|
||||
* Search command-line history with `!search`
|
||||
* Clear history with `!clear`
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
### v1.14 <small>(11 September 2022)</small>
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
### v1.13 <small>(3 September 2022</small>
|
||||
|
||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
[Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming stable-diffusion-v1.5)
|
||||
to be added without altering the code. ([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](features/CHANGELOG.md)**.
|
||||
For older changelogs, please visit the **[CHANGELOG](CHANGELOG.md#v114-11-september-2022)**.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
|
||||
@@ -1,267 +0,0 @@
|
||||
---
|
||||
title: Installing Models
|
||||
---
|
||||
|
||||
# :octicons-paintbrush-16: Installing Models
|
||||
|
||||
## Model Weight Files
|
||||
|
||||
The model weight files ('*.ckpt') are the Stable Diffusion "secret
|
||||
sauce". They are the product of training the AI on millions of
|
||||
captioned images gathered from multiple sources.
|
||||
|
||||
Originally there was only a single Stable Diffusion weights file,
|
||||
which many people named `model.ckpt`. Now there are dozens or more
|
||||
that have been "fine tuned" to provide particulary styles, genres, or
|
||||
other features. InvokeAI allows you to install and run multiple model
|
||||
weight files and switch between them quickly in the command-line and
|
||||
web interfaces.
|
||||
|
||||
This manual will guide you through installing and configuring model
|
||||
weight files.
|
||||
|
||||
## Base Models
|
||||
|
||||
InvokeAI comes with support for a good initial set of models listed in
|
||||
the model configuration file `configs/models.yaml`. They are:
|
||||
|
||||
| Model | Weight File | Description | DOWNLOAD FROM |
|
||||
| ---------------------- | ----------------------------- |--------------------------------- | ----------------|
|
||||
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model| https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
|
||||
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
|
||||
| <all models> | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
|
||||
|
||||
|
||||
Note that these files are covered by an "Ethical AI" license which
|
||||
forbids certain uses. You will need to create an account on the
|
||||
Hugging Face website and accept the license terms before you can
|
||||
access the files.
|
||||
|
||||
The predefined configuration file for InvokeAI (located at
|
||||
`configs/models.yaml`) provides entries for each of these weights
|
||||
files. `stable-diffusion-1.5` is the default model used, and we
|
||||
strongly recommend that you install this weights file if nothing else.
|
||||
|
||||
## Community-Contributed Models
|
||||
|
||||
There are too many to list here and more are being contributed every
|
||||
day. Hugging Face maintains a [fast-growing
|
||||
repository](https://huggingface.co/sd-concepts-library) of fine-tune
|
||||
(".bin") models that can be imported into InvokeAI by passing the
|
||||
`--embedding_path` option to the `invoke.py` command.
|
||||
|
||||
[This page](https://rentry.org/sdmodels) hosts a large list of
|
||||
official and unofficial Stable Diffusion models and where they can be
|
||||
obtained.
|
||||
|
||||
## Installation
|
||||
|
||||
There are three ways to install weights files:
|
||||
|
||||
1. During InvokeAI installation, the `preload_models.py` script can
|
||||
download them for you.
|
||||
|
||||
2. You can use the command-line interface (CLI) to import, configure
|
||||
and modify new models files.
|
||||
|
||||
3. You can download the files manually and add the appropriate entries
|
||||
to `models.yaml`.
|
||||
|
||||
### Installation via `preload_models.py`
|
||||
|
||||
This is the most automatic way. Run `scripts/preload_models.py` from
|
||||
the console. It will ask you to select which models to download and
|
||||
lead you through the steps of setting up a Hugging Face account if you
|
||||
haven't done so already.
|
||||
|
||||
To start, from within the InvokeAI directory run the command `python
|
||||
scripts/preload_models.py` (Linux/MacOS) or `python
|
||||
scripts\preload_models.py` (Windows):
|
||||
|
||||
```
|
||||
Loading Python libraries...
|
||||
|
||||
** INTRODUCTION **
|
||||
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
|
||||
and other large models that are needed for text to image generation. At any point you may interrupt
|
||||
this program and resume later.
|
||||
|
||||
** WEIGHT SELECTION **
|
||||
Would you like to download the Stable Diffusion model weights now? [y]
|
||||
|
||||
Choose the weight file(s) you wish to download. Before downloading you
|
||||
will be given the option to view and change your selections.
|
||||
|
||||
[1] stable-diffusion-1.5:
|
||||
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
|
||||
Download? [y]
|
||||
[2] inpainting-1.5:
|
||||
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
|
||||
Download? [y]
|
||||
[3] stable-diffusion-1.4:
|
||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||
Download? [n] n
|
||||
[4] waifu-diffusion-1.3:
|
||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
||||
Download? [n] y
|
||||
[5] ft-mse-improved-autoencoder-840000:
|
||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
||||
Download? [y] y
|
||||
The following weight files will be downloaded:
|
||||
[1] stable-diffusion-1.5*
|
||||
[2] inpainting-1.5
|
||||
[4] waifu-diffusion-1.3
|
||||
[5] ft-mse-improved-autoencoder-840000
|
||||
*default
|
||||
Ok to download? [y]
|
||||
** LICENSE AGREEMENT FOR WEIGHT FILES **
|
||||
|
||||
1. To download the Stable Diffusion weight files you need to read and accept the
|
||||
CreativeML Responsible AI license. If you have not already done so, please
|
||||
create an account using the "Sign Up" button:
|
||||
|
||||
https://huggingface.co
|
||||
|
||||
You will need to verify your email address as part of the HuggingFace
|
||||
registration process.
|
||||
|
||||
2. After creating the account, login under your account and accept
|
||||
the license terms located here:
|
||||
|
||||
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
|
||||
|
||||
Press <enter> when you are ready to continue:
|
||||
...
|
||||
```
|
||||
|
||||
When the script is complete, you will find the downloaded weights
|
||||
files in `models/ldm/stable-diffusion-v1` and a matching configuration
|
||||
file in `configs/models.yaml`.
|
||||
|
||||
You can run the script again to add any models you didn't select the
|
||||
first time. Note that as a safety measure the script will _never_
|
||||
remove a previously-installed weights file. You will have to do this
|
||||
manually.
|
||||
|
||||
### Installation via the CLI
|
||||
|
||||
You can install a new model, including any of the community-supported
|
||||
ones, via the command-line client's `!import_model` command.
|
||||
|
||||
1. First download the desired model weights file and place it under `models/ldm/stable-diffusion-v1/`.
|
||||
You may rename the weights file to something more memorable if you wish. Record the path of the
|
||||
weights file (e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
|
||||
|
||||
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
|
||||
|
||||
3. At the `invoke>` command-line, enter the command `!import_model <path to model>`.
|
||||
For example:
|
||||
|
||||
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
||||
|
||||
(Hint - the CLI supports file path autocompletion. Type a bit of the path
|
||||
name and hit <tab> in order to get a choice of possible completions.)
|
||||
|
||||
4. Follow the wizard's instructions to complete installation as shown in the example
|
||||
here:
|
||||
|
||||
```
|
||||
invoke> <b>!import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt</b>
|
||||
>> Model import in process. Please enter the values needed to configure this model:
|
||||
|
||||
Name for this model: <b>arabian-nights</b>
|
||||
Description of this model: <b>Arabian Nights Fine Tune v1.0</b>
|
||||
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
|
||||
Default image width: <b>512</b>
|
||||
Default image height: <b>512</b>
|
||||
>> New configuration:
|
||||
arabian-nights:
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
description: Arabian Nights Fine Tune v1.0
|
||||
height: 512
|
||||
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
width: 512
|
||||
OK to import [n]? <b>y</b>
|
||||
>> Caching model stable-diffusion-1.4 in system RAM
|
||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
| LatentDiffusion: Running in eps-prediction mode
|
||||
| DiffusionWrapper has 859.52 M params.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
|
||||
```
|
||||
|
||||
If you've previously installed the fine-tune VAE file `vae-ft-mse-840000-ema-pruned.ckpt`,
|
||||
the wizard will also ask you if you want to add this VAE to the model.
|
||||
|
||||
The appropriate entry for this model will be added to `configs/models.yaml` and it will
|
||||
be available to use in the CLI immediately.
|
||||
|
||||
The CLI has additional commands for switching among, viewing, editing,
|
||||
deleting the available models. These are described in [Command Line
|
||||
Client](../features/CLI.md#model-selection-and-importation), but the two most
|
||||
frequently-used are `!models` and `!switch <name of model>`. The first
|
||||
prints a table of models that InvokeAI knows about and their load
|
||||
status. The second will load the requested model and lets you switch
|
||||
back and forth quickly among loaded models.
|
||||
|
||||
### Manually editing of `configs/models.yaml`
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit
|
||||
`models.yaml` directly.
|
||||
|
||||
First you need to download the desired .ckpt file and place it in
|
||||
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the
|
||||
previous section. Record the path to the weights file,
|
||||
e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
||||
|
||||
Then using a **text** editor (e.g. the Windows Notepad application),
|
||||
open the file `configs/models.yaml`, and add a new stanza that follows
|
||||
this model:
|
||||
|
||||
```
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
default: false
|
||||
```
|
||||
|
||||
* arabian-nights-1.0
|
||||
- This is the name of the model that you will refer to from within the
|
||||
CLI and the WebGUI when you need to load and use the model.
|
||||
|
||||
* description
|
||||
- Any description that you want to add to the model to remind you what
|
||||
it is.
|
||||
|
||||
* weights
|
||||
- Relative path to the .ckpt weights file for this model.
|
||||
|
||||
* config
|
||||
- This is the confusingly-named configuration file for the model itself.
|
||||
Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens
|
||||
to need a custom configuration, in which case the place you downloaded it
|
||||
from will tell you what to use instead. For example, the runwayML custom
|
||||
inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`.
|
||||
This is already inclued in the InvokeAI distribution and is configured automatically
|
||||
for you by the `preload_models.py` script.
|
||||
|
||||
* vae
|
||||
- If you want to add a VAE file to the model, then enter its path here.
|
||||
|
||||
* width, height
|
||||
- This is the width and height of the images used to train the model.
|
||||
Currently they are always 512 and 512.
|
||||
|
||||
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
|
||||
available for your use.
|
||||
|
||||
|
||||
@@ -36,6 +36,20 @@ another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
#### Get the data files
|
||||
|
||||
Go to
|
||||
[Hugging Face](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original),
|
||||
and click "Access repository" to Download the model file `sd-v1-4.ckpt` (~4 GB)
|
||||
to `~/Downloads`. You'll need to create an account but it's quick and free.
|
||||
|
||||
Also download the face restoration model.
|
||||
|
||||
```Shell
|
||||
cd ~/Downloads
|
||||
wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth
|
||||
```
|
||||
|
||||
#### Install [Docker](https://github.com/santisbon/guides#docker)
|
||||
|
||||
On the Docker Desktop app, go to Preferences, Resources, Advanced. Increase the
|
||||
@@ -43,61 +57,86 @@ CPUs and Memory to avoid this
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
|
||||
increase Swap and Disk image size too.
|
||||
|
||||
#### Get a Huggingface-Token
|
||||
|
||||
Go to [Hugging Face](https://huggingface.co/settings/tokens), create a token and
|
||||
temporary place it somewhere like a open texteditor window (but dont save it!,
|
||||
only keep it open, we need it in the next step)
|
||||
|
||||
### Setup
|
||||
|
||||
Set the fork you want to use and other variables.
|
||||
|
||||
!!! tip
|
||||
```Shell
|
||||
TAG_STABLE_DIFFUSION="santisbon/stable-diffusion"
|
||||
PLATFORM="linux/arm64"
|
||||
GITHUB_STABLE_DIFFUSION="-b orig-gfpgan https://github.com/santisbon/stable-diffusion.git"
|
||||
REQS_STABLE_DIFFUSION="requirements-linux-arm64.txt"
|
||||
CONDA_SUBDIR="osx-arm64"
|
||||
|
||||
I preffer to save my env vars
|
||||
in the repository root in a `.env` (or `.envrc`) file to automatically re-apply
|
||||
them when I come back.
|
||||
|
||||
The build- and run- scripts contain default values for almost everything,
|
||||
besides the [Hugging Face Token](https://huggingface.co/settings/tokens) you
|
||||
created in the last step.
|
||||
|
||||
Some Suggestions of variables you may want to change besides the Token:
|
||||
|
||||
| Environment-Variable | Description |
|
||||
| ------------------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint |
|
||||
| `ARCH=aarch64` | if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used |
|
||||
| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch |
|
||||
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork |
|
||||
|
||||
#### Build the Image
|
||||
|
||||
I provided a build script, which is located in `docker-build/build.sh` but still
|
||||
needs to be executed from the Repository root.
|
||||
|
||||
```bash
|
||||
docker-build/build.sh
|
||||
echo $TAG_STABLE_DIFFUSION
|
||||
echo $PLATFORM
|
||||
echo $GITHUB_STABLE_DIFFUSION
|
||||
echo $REQS_STABLE_DIFFUSION
|
||||
echo $CONDA_SUBDIR
|
||||
```
|
||||
|
||||
The build Script not only builds the container, but also creates the docker
|
||||
volume if not existing yet, or if empty it will just download the models. When
|
||||
it is done you can run the container via the run script
|
||||
Create a Docker volume for the downloaded model files.
|
||||
|
||||
```bash
|
||||
docker-build/run.sh
|
||||
```Shell
|
||||
docker volume create my-vol
|
||||
```
|
||||
|
||||
When used without arguments, the container will start the website and provide
|
||||
you the link to open it. But if you want to use some other parameters you can
|
||||
also do so.
|
||||
Copy the data files to the Docker volume using a lightweight Linux container.
|
||||
We'll need the models at run time. You just need to create the container with
|
||||
the mountpoint; no need to run this dummy container.
|
||||
|
||||
!!! warning "Deprecated"
|
||||
```Shell
|
||||
cd ~/Downloads # or wherever you saved the files
|
||||
|
||||
From here on it is the rest of the previous Docker-Docs, which will still
|
||||
provide usefull informations for one or the other.
|
||||
docker create --platform $PLATFORM --name dummy --mount source=my-vol,target=/data alpine
|
||||
|
||||
docker cp sd-v1-4.ckpt dummy:/data
|
||||
docker cp GFPGANv1.4.pth dummy:/data
|
||||
```
|
||||
|
||||
Get the repo and download the Miniconda installer (we'll need it at build time).
|
||||
Replace the URL with the version matching your container OS and the architecture
|
||||
it will run on.
|
||||
|
||||
```Shell
|
||||
cd ~
|
||||
git clone $GITHUB_STABLE_DIFFUSION
|
||||
|
||||
cd stable-diffusion/docker-build
|
||||
chmod +x entrypoint.sh
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-aarch64.sh -O anaconda.sh && chmod +x anaconda.sh
|
||||
```
|
||||
|
||||
Build the Docker image. Give it any tag `-t` that you want.
|
||||
Choose the Linux container's host platform: x86-64/Intel is `amd64`. Apple
|
||||
silicon is `arm64`. If deploying the container to the cloud to leverage powerful
|
||||
GPU instances you'll be on amd64 hardware but if you're just trying this out
|
||||
locally on Apple silicon choose arm64.
|
||||
The application uses libraries that need to match the host environment so use
|
||||
the appropriate requirements file.
|
||||
Tip: Check that your shell session has the env variables set above.
|
||||
|
||||
```Shell
|
||||
docker build -t $TAG_STABLE_DIFFUSION \
|
||||
--platform $PLATFORM \
|
||||
--build-arg gsd=$GITHUB_STABLE_DIFFUSION \
|
||||
--build-arg rsd=$REQS_STABLE_DIFFUSION \
|
||||
--build-arg cs=$CONDA_SUBDIR \
|
||||
.
|
||||
```
|
||||
|
||||
Run a container using your built image.
|
||||
Tip: Make sure you've created and populated the Docker volume (above).
|
||||
|
||||
```Shell
|
||||
docker run -it \
|
||||
--rm \
|
||||
--platform $PLATFORM \
|
||||
--name stable-diffusion \
|
||||
--hostname stable-diffusion \
|
||||
--mount source=my-vol,target=/data \
|
||||
$TAG_STABLE_DIFFUSION
|
||||
```
|
||||
|
||||
## Usage (time to have fun)
|
||||
|
||||
@@ -201,8 +240,7 @@ server with:
|
||||
python3 scripts/invoke.py --full_precision --web
|
||||
```
|
||||
|
||||
If it's running on your Mac point your Mac web browser to
|
||||
<http://127.0.0.1:9090>
|
||||
If it's running on your Mac point your Mac web browser to http://127.0.0.1:9090
|
||||
|
||||
Press Control-C at the command line to stop the web server.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Manual Installation, Linux
|
||||
title: Linux
|
||||
---
|
||||
|
||||
# :fontawesome-brands-linux: Linux
|
||||
@@ -63,16 +63,24 @@ title: Manual Installation, Linux
|
||||
model loading scheme to allow the script to work on GPU machines that are not
|
||||
internet connected. See [Preload Models](../features/OTHER.md#preload-models)
|
||||
|
||||
7. Install the weights for the stable diffusion model.
|
||||
7. Now you need to install the weights for the stable diffusion model.
|
||||
|
||||
- Sign up at https://huggingface.co
|
||||
- Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)
|
||||
- Accept the terms and click Access Repository
|
||||
- Download [v1-5-pruned-emaonly.ckpt (4.27 GB)](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt)
|
||||
and move it into this directory under `models/ldm/stable_diffusion_v1/v1-5-pruned-emaonly.ckpt`
|
||||
- For running with the released weights, you will first need to set up an acount
|
||||
with [Hugging Face](https://huggingface.co).
|
||||
- Use your credentials to log in, and then point your browser [here](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original).
|
||||
- You may be asked to sign a license agreement at this point.
|
||||
- Click on "Files and versions" near the top of the page, and then click on the
|
||||
file named "sd-v1-4.ckpt". You'll be taken to a page that prompts you to click
|
||||
the "download" link. Save the file somewhere safe on your local machine.
|
||||
|
||||
There are many other models that you can use. Please see [../features/INSTALLING_MODELS.md]
|
||||
for details.
|
||||
Now run the following commands from within the stable-diffusion directory.
|
||||
This will create a symbolic link from the stable-diffusion model.ckpt file, to
|
||||
the true location of the `sd-v1-4.ckpt` file.
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ mkdir -p models/ldm/stable-diffusion-v1
|
||||
(invokeai) ~/InvokeAI$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt
|
||||
```
|
||||
|
||||
8. Start generating images!
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Manual Installation, macOS
|
||||
title: macOS
|
||||
---
|
||||
|
||||
# :fontawesome-brands-apple: macOS
|
||||
@@ -24,15 +24,9 @@ First you need to download a large checkpoint file.
|
||||
1. Sign up at https://huggingface.co
|
||||
2. Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)
|
||||
3. Accept the terms and click Access Repository
|
||||
4. Download [v1-5-pruned-emaonly.ckpt (4.27 GB)](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt)
|
||||
and move it into this directory under `models/ldm/stable_diffusion_v1/v1-5-pruned-emaonly.ckpt`
|
||||
4. Download [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) and note where you have saved it (probably the Downloads folder). You may want to move it somewhere else for longer term storage - SD needs this file to run.
|
||||
|
||||
There are many other models that you can try. Please see [../features/INSTALLING_MODELS.md]
|
||||
for details.
|
||||
|
||||
While that is downloading, open Terminal and run the following
|
||||
commands one at a time, reading the comments and taking care to run
|
||||
the appropriate command for your Mac's architecture (Intel or M1).
|
||||
While that is downloading, open Terminal and run the following commands one at a time, reading the comments and taking care to run the appropriate command for your Mac's architecture (Intel or M1).
|
||||
|
||||
!!! todo "Homebrew"
|
||||
|
||||
@@ -238,7 +232,7 @@ There are several causes of these errors:
|
||||
conda env remove -n invokeai
|
||||
conda env create -f environment-mac.yml
|
||||
```
|
||||
|
||||
|
||||
4. If you have activated the invokeai virtual environment and tried rebuilding it,
|
||||
maybe the problem could be that I have something installed that you don't and
|
||||
you'll just need to manually install it. Make sure you activate the virtual
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Manual Installation, Windows
|
||||
title: Windows
|
||||
---
|
||||
|
||||
# :fontawesome-brands-windows: Windows
|
||||
@@ -83,14 +83,23 @@ in the wiki
|
||||
|
||||
8. Now you need to install the weights for the big stable diffusion model.
|
||||
|
||||
- Sign up at https://huggingface.co
|
||||
- Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)
|
||||
- Accept the terms and click Access Repository
|
||||
- Download [v1-5-pruned-emaonly.ckpt (4.27 GB)](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt)
|
||||
and move it into this directory under `models/ldm/stable_diffusion_v1/v1-5-pruned-emaonly.ckpt`
|
||||
1. For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co).
|
||||
2. Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.
|
||||
3. You may be asked to sign a license agreement at this point.
|
||||
4. Click on "Files and versions" near the top of the page, and then click on the file named `sd-v1-4.ckpt`. You'll be taken to a page that
|
||||
prompts you to click the "download" link. Now save the file somewhere safe on your local machine.
|
||||
5. The weight file is >4 GB in size, so
|
||||
downloading may take a while.
|
||||
|
||||
There are many other models that you can use. Please see [../features/INSTALLING_MODELS.md]
|
||||
for details.
|
||||
Now run the following commands from **within the InvokeAI directory** to copy the weights file to the right place:
|
||||
|
||||
```batch
|
||||
mkdir -p models\ldm\stable-diffusion-v1
|
||||
copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt
|
||||
```
|
||||
|
||||
Please replace `C:\path\to\sd-v1.4.ckpt` with the correct path to wherever you stashed this file. If you prefer not to copy or move the .ckpt file,
|
||||
you may instead create a shortcut to it from within `models\ldm\stable-diffusion-v1\`.
|
||||
|
||||
9. Start generating images!
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python>=3.9
|
||||
- pip>=20.3
|
||||
- cudatoolkit
|
||||
- pytorch
|
||||
- torchvision
|
||||
- numpy=1.19
|
||||
- imageio=2.9.0
|
||||
- opencv=4.6.0
|
||||
- pillow=8.*
|
||||
- flask=2.1.*
|
||||
- flask_cors=3.0.10
|
||||
- flask-socketio=5.3.0
|
||||
- send2trash=1.8.0
|
||||
- eventlet
|
||||
- albumentations=0.4.3
|
||||
- pudb=2019.2
|
||||
- imageio-ffmpeg=0.4.2
|
||||
- pytorch-lightning=1.7.7
|
||||
- streamlit
|
||||
- einops=0.3.0
|
||||
- kornia=0.6
|
||||
- torchmetrics=0.7.0
|
||||
- transformers=4.21.3
|
||||
- torch-fidelity=0.3.0
|
||||
- tokenizers>=0.11.1,!=0.11.3,<0.13
|
||||
- pip:
|
||||
- omegaconf==2.1.1
|
||||
- realesrgan==0.2.5.0
|
||||
- test-tube>=0.7.5
|
||||
- pyreadline3
|
||||
- dependency_injector==4.40.0
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||
@@ -3,12 +3,12 @@ channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python=3.9.13
|
||||
- pip=22.2.2
|
||||
|
||||
- pytorch=1.12.1
|
||||
- torchvision=0.13.1
|
||||
- python>=3.9, <3.10
|
||||
- pip>=22.2
|
||||
|
||||
# pytorch left unpinned
|
||||
- pytorch
|
||||
- torchvision
|
||||
# I suggest to keep the other deps sorted for convenience.
|
||||
# To determine what the latest versions should be, run:
|
||||
#
|
||||
@@ -16,49 +16,45 @@ dependencies:
|
||||
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
|
||||
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
|
||||
# ```
|
||||
|
||||
- albumentations=1.2.1
|
||||
- coloredlogs=15.0.1
|
||||
- diffusers=0.6.0
|
||||
- einops=0.4.1
|
||||
- grpcio=1.46.4
|
||||
- humanfriendly=10.0
|
||||
- imageio=2.21.2
|
||||
- imageio-ffmpeg=0.4.7
|
||||
- imgaug=0.4.0
|
||||
- kornia=0.6.7
|
||||
- mpmath=1.2.1
|
||||
- nomkl # arm64 has only 1.0 while x64 needs 3.0
|
||||
- numpy=1.23.4
|
||||
- omegaconf=2.1.1
|
||||
- openh264=2.3.0
|
||||
- onnx=1.12.0
|
||||
- onnxruntime=1.12.1
|
||||
- pudb=2022.1
|
||||
- pytorch-lightning=1.7.7
|
||||
- scipy=1.9.3
|
||||
- streamlit=1.12.2
|
||||
- sympy=1.10.1
|
||||
- tensorboard=2.10.0
|
||||
- torchmetrics=0.10.1
|
||||
- py-opencv=4.6.0
|
||||
- flask=2.1.3
|
||||
- flask-socketio=5.3.0
|
||||
- flask-cors=3.0.10
|
||||
- eventlet=0.33.1
|
||||
- protobuf=3.20.1
|
||||
- send2trash=1.8.0
|
||||
- transformers=4.23.1
|
||||
- torch-fidelity=0.3.0
|
||||
- albumentations
|
||||
- coloredlogs
|
||||
- einops
|
||||
- grpcio
|
||||
- humanfriendly
|
||||
- imageio
|
||||
- imageio-ffmpeg
|
||||
- imgaug
|
||||
- kornia
|
||||
- mpmath
|
||||
- nomkl
|
||||
- numpy
|
||||
- omegaconf
|
||||
- openh264
|
||||
- onnx
|
||||
- onnxruntime
|
||||
- pudb
|
||||
- pytorch-lightning
|
||||
- scipy
|
||||
- streamlit
|
||||
- sympy
|
||||
- tensorboard
|
||||
- torchmetrics
|
||||
- pip:
|
||||
- flask==2.1.3
|
||||
- flask_socketio==5.3.0
|
||||
- flask_cors==3.0.10
|
||||
- dependency_injector==4.40.0
|
||||
- eventlet==0.33.1
|
||||
- protobuf==3.19.6
|
||||
- realesrgan==0.2.5.0
|
||||
- send2trash==1.8.0
|
||||
- test-tube==0.7.5
|
||||
- transformers==4.21.3
|
||||
- torch-fidelity==0.3.0
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||
|
||||
@@ -4,11 +4,11 @@ channels:
|
||||
- defaults
|
||||
dependencies:
|
||||
- python>=3.9
|
||||
- pip=20.3
|
||||
- cudatoolkit=11.3
|
||||
- pytorch=1.11.0
|
||||
- torchvision=0.12.0
|
||||
- numpy=1.19.2
|
||||
- pip>=22.2
|
||||
- cudatoolkit
|
||||
- pytorch
|
||||
- torchvision
|
||||
- numpy
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- opencv-python==4.5.5.64
|
||||
@@ -26,7 +26,6 @@ dependencies:
|
||||
- pyreadline3
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.21.3
|
||||
- diffusers==0.6.0
|
||||
- torchmetrics==0.7.0
|
||||
- flask==2.1.3
|
||||
- flask_socketio==5.3.0
|
||||
@@ -38,5 +37,4 @@ dependencies:
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- -e .
|
||||
|
||||
1
frontend/dist/assets/index.352e4760.css
vendored
1
frontend/dist/assets/index.58175ea1.css
vendored
Normal file
517
frontend/dist/assets/index.64b87783.js
vendored
483
frontend/dist/assets/index.ea68b5f5.js
vendored
Normal file
6
frontend/dist/index.html
vendored
@@ -5,9 +5,9 @@
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.64b87783.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.352e4760.css">
|
||||
<link rel="shortcut icon" type="icon" href="/assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="/assets/index.ea68b5f5.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index.58175ea1.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
@@ -14,37 +14,28 @@
|
||||
"@chakra-ui/react": "^2.3.1",
|
||||
"@emotion/react": "^11.10.4",
|
||||
"@emotion/styled": "^11.10.4",
|
||||
"@radix-ui/react-context-menu": "^2.0.1",
|
||||
"@radix-ui/react-slider": "^1.1.0",
|
||||
"@radix-ui/react-tooltip": "^1.0.2",
|
||||
"@reduxjs/toolkit": "^1.8.5",
|
||||
"@types/uuid": "^8.3.4",
|
||||
"add": "^2.0.6",
|
||||
"dateformat": "^5.0.3",
|
||||
"framer-motion": "^7.2.1",
|
||||
"konva": "^8.3.13",
|
||||
"lodash": "^4.17.21",
|
||||
"re-resizable": "^6.9.9",
|
||||
"react": "^18.2.0",
|
||||
"react-colorful": "^5.6.1",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-dropzone": "^14.2.2",
|
||||
"react-hotkeys-hook": "^3.4.7",
|
||||
"react-icons": "^4.4.0",
|
||||
"react-konva": "^18.2.3",
|
||||
"react-masonry-css": "^1.0.16",
|
||||
"react-redux": "^8.0.2",
|
||||
"react-transition-group": "^4.4.5",
|
||||
"redux-persist": "^6.0.0",
|
||||
"socket.io": "^4.5.2",
|
||||
"socket.io-client": "^4.5.2",
|
||||
"uuid": "^9.0.0",
|
||||
"yarn": "^1.22.19"
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/dateformat": "^5.0.0",
|
||||
"@types/react": "^18.0.17",
|
||||
"@types/react-dom": "^18.0.6",
|
||||
"@types/react-transition-group": "^4.4.5",
|
||||
"@typescript-eslint/eslint-plugin": "^5.36.2",
|
||||
"@typescript-eslint/parser": "^5.36.2",
|
||||
"@vitejs/plugin-react": "^2.0.1",
|
||||
|
||||
@@ -2,20 +2,22 @@
|
||||
|
||||
.App {
|
||||
display: grid;
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
background-color: var(--background-color);
|
||||
width: max-content;
|
||||
}
|
||||
|
||||
.app-content {
|
||||
display: grid;
|
||||
row-gap: 0.5rem;
|
||||
padding: $app-padding;
|
||||
row-gap: 1rem;
|
||||
margin: 0.6rem;
|
||||
padding: 1rem;
|
||||
border-radius: 0.5rem;
|
||||
background-color: var(--background-color);
|
||||
grid-auto-rows: max-content;
|
||||
width: $app-width;
|
||||
height: $app-height;
|
||||
min-width: min-content;
|
||||
}
|
||||
|
||||
.app-console {
|
||||
z-index: 20;
|
||||
z-index: 9999;
|
||||
}
|
||||
|
||||
@@ -7,13 +7,11 @@ import { useAppDispatch } from './store';
|
||||
import { requestSystemConfig } from './socketio/actions';
|
||||
import { keepGUIAlive } from './utils';
|
||||
import InvokeTabs from '../features/tabs/InvokeTabs';
|
||||
import ImageUploader from '../common/components/ImageUploader';
|
||||
|
||||
keepGUIAlive();
|
||||
|
||||
const App = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const [isReady, setIsReady] = useState<boolean>(false);
|
||||
|
||||
useEffect(() => {
|
||||
@@ -23,16 +21,14 @@ const App = () => {
|
||||
|
||||
return isReady ? (
|
||||
<div className="App">
|
||||
<ImageUploader>
|
||||
<ProgressBar />
|
||||
<div className="app-content">
|
||||
<SiteHeader />
|
||||
<InvokeTabs />
|
||||
</div>
|
||||
<div className="app-console">
|
||||
<Console />
|
||||
</div>
|
||||
</ImageUploader>
|
||||
<ProgressBar />
|
||||
<div className="app-content">
|
||||
<SiteHeader />
|
||||
<InvokeTabs />
|
||||
</div>
|
||||
<div className="app-console">
|
||||
<Console />
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<Loading />
|
||||
|
||||
@@ -32,8 +32,27 @@ export const UPSCALING_LEVELS: Array<{ key: string; value: number }> = [
|
||||
{ key: '4x', value: 4 },
|
||||
];
|
||||
|
||||
// Internal to human-readable parameters
|
||||
export const PARAMETERS: { [key: string]: string } = {
|
||||
prompt: 'Prompt',
|
||||
iterations: 'Iterations',
|
||||
steps: 'Steps',
|
||||
cfgScale: 'CFG Scale',
|
||||
height: 'Height',
|
||||
width: 'Width',
|
||||
sampler: 'Sampler',
|
||||
seed: 'Seed',
|
||||
img2imgStrength: 'img2img Strength',
|
||||
gfpganStrength: 'GFPGAN Strength',
|
||||
upscalingLevel: 'Upscaling Level',
|
||||
upscalingStrength: 'Upscaling Strength',
|
||||
initialImagePath: 'Initial Image',
|
||||
maskPath: 'Initial Image Mask',
|
||||
shouldFitToWidthHeight: 'Fit Initial Image',
|
||||
seamless: 'Seamless Tiling',
|
||||
hiresFix: 'High Resolution Optimizations',
|
||||
};
|
||||
|
||||
export const NUMPY_RAND_MIN = 0;
|
||||
|
||||
export const NUMPY_RAND_MAX = 4294967295;
|
||||
|
||||
export const FACETOOL_TYPES = ['gfpgan', 'codeformer'] as const;
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
import { createContext } from 'react';
|
||||
|
||||
type VoidFunc = () => void;
|
||||
|
||||
type ImageUploaderTriggerContextType = VoidFunc | null;
|
||||
|
||||
export const ImageUploaderTriggerContext =
|
||||
createContext<ImageUploaderTriggerContextType>(null);
|
||||
@@ -15,8 +15,8 @@ export enum Feature {
|
||||
IMAGE_TO_IMAGE,
|
||||
}
|
||||
/** For each tooltip in the UI, the below feature definitions & props will pull relevant information into the tooltip.
|
||||
*
|
||||
* To-do: href & GuideImages are placeholders, and are not currently utilized, but will be updated (along with the tooltip UI) as feature and UI development and we get a better idea on where things "forever homes" will be .
|
||||
*
|
||||
* To-do: href & GuideImages are placeholders, and are not currently utilized, but will be updated (along with the tooltip UI) as feature and UI development and we get a better idea on where things "forever homes" will be .
|
||||
*/
|
||||
export const FEATURES: Record<Feature, FeatureHelpInfo> = {
|
||||
[Feature.PROMPT]: {
|
||||
@@ -30,8 +30,7 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
[Feature.OTHER]: {
|
||||
text: 'These options will enable alternative processing modes for Invoke. Seamless tiling will work to generate repeating patterns in the output. High Resolution Optimization performs a two-step generation cycle, and should be used at higher resolutions when you desire a more coherent image/composition. ',
|
||||
href: 'link/to/docs/feature3.html',
|
||||
text: 'These options will enable alternative processing modes for Invoke. Seamless tiling will work to generate repeating patterns in the output. High Resolution Optimization performs a two-step generation cycle, and should be used at higher resolutions when you desire a more coherent image/composition. ', href: 'link/to/docs/feature3.html',
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
[Feature.SEED]: {
|
||||
@@ -50,7 +49,7 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
[Feature.FACE_CORRECTION]: {
|
||||
text: 'Using GFPGAN or Codeformer, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher strength values will apply a stronger corrective pressure on outputs, resulting in more appealing faces. With Codeformer, a higher fidelity will attempt to preserve the original image, at the expense of face correction strength.',
|
||||
text: 'Using GFPGAN, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher values will apply a stronger corrective pressure on outputs, resulting in more appealing faces (with less respect for accuracy of the original subject).',
|
||||
href: 'link/to/docs/feature3.html',
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
|
||||
50
frontend/src/app/invokeai.d.ts
vendored
@@ -12,8 +12,6 @@
|
||||
* 'gfpgan'.
|
||||
*/
|
||||
|
||||
import { Category as GalleryCategory } from '../features/gallery/gallerySlice';
|
||||
|
||||
/**
|
||||
* TODO:
|
||||
* Once an image has been generated, if it is postprocessed again,
|
||||
@@ -91,31 +89,27 @@ export declare type ESRGANMetadata = CommonPostProcessedImageMetadata & {
|
||||
strength: number;
|
||||
};
|
||||
|
||||
export declare type FacetoolMetadata = CommonPostProcessedImageMetadata & {
|
||||
type: 'gfpgan' | 'codeformer';
|
||||
export declare type GFPGANMetadata = CommonPostProcessedImageMetadata & {
|
||||
type: 'gfpgan';
|
||||
strength: number;
|
||||
fidelity?: number;
|
||||
};
|
||||
|
||||
// Superset of all postprocessed image metadata types..
|
||||
export declare type PostProcessedImageMetadata =
|
||||
| ESRGANMetadata
|
||||
| FacetoolMetadata;
|
||||
| GFPGANMetadata;
|
||||
|
||||
// Metadata includes the system config and image metadata.
|
||||
export declare type Metadata = SystemConfig & {
|
||||
image: GeneratedImageMetadata | PostProcessedImageMetadata;
|
||||
};
|
||||
|
||||
// An Image has a UUID, url, modified timestamp, width, height and maybe metadata
|
||||
// An Image has a UUID, url (path?) and Metadata.
|
||||
export declare type Image = {
|
||||
uuid: string;
|
||||
url: string;
|
||||
mtime: number;
|
||||
metadata?: Metadata;
|
||||
width: number;
|
||||
height: number;
|
||||
category: GalleryCategory;
|
||||
metadata: Metadata;
|
||||
};
|
||||
|
||||
// GalleryImages is an array of Image.
|
||||
@@ -145,35 +139,20 @@ export declare type SystemConfig = {
|
||||
model_hash: string;
|
||||
app_id: string;
|
||||
app_version: string;
|
||||
model_list: ModelList;
|
||||
};
|
||||
|
||||
export declare type ModelStatus = 'active' | 'cached' | 'not loaded';
|
||||
|
||||
export declare type Model = {
|
||||
status: ModelStatus;
|
||||
description: string;
|
||||
};
|
||||
|
||||
export declare type ModelList = Record<string, Model>;
|
||||
|
||||
/**
|
||||
* These types type data received from the server via socketio.
|
||||
*/
|
||||
|
||||
export declare type ModelChangeResponse = {
|
||||
model_name: string;
|
||||
model_list: ModelList;
|
||||
};
|
||||
|
||||
export declare type SystemStatusResponse = SystemStatus;
|
||||
|
||||
export declare type SystemConfigResponse = SystemConfig;
|
||||
|
||||
export declare type ImageResultResponse = Omit<Image, 'uuid'>;
|
||||
|
||||
export declare type ImageUploadResponse = Omit<Image, 'uuid' | 'metadata'> & {
|
||||
destination: 'img2img' | 'inpainting';
|
||||
export declare type ImageResultResponse = {
|
||||
url: string;
|
||||
mtime: number;
|
||||
metadata: Metadata;
|
||||
};
|
||||
|
||||
export declare type ErrorResponse = {
|
||||
@@ -184,22 +163,13 @@ export declare type ErrorResponse = {
|
||||
export declare type GalleryImagesResponse = {
|
||||
images: Array<Omit<Image, 'uuid'>>;
|
||||
areMoreImagesAvailable: boolean;
|
||||
category: GalleryCategory;
|
||||
};
|
||||
|
||||
export declare type ImageDeletedResponse = {
|
||||
export declare type ImageUrlAndUuidResponse = {
|
||||
uuid: string;
|
||||
url: string;
|
||||
category: GalleryCategory;
|
||||
};
|
||||
|
||||
export declare type ImageUrlResponse = {
|
||||
url: string;
|
||||
};
|
||||
|
||||
export declare type ImageUploadDestination = 'img2img' | 'inpainting';
|
||||
|
||||
export declare type UploadImagePayload = {
|
||||
file: File;
|
||||
destination?: ImageUploadDestination;
|
||||
};
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { GalleryCategory } from '../../features/gallery/gallerySlice';
|
||||
import { InvokeTabName } from '../../features/tabs/InvokeTabs';
|
||||
import * as InvokeAI from '../invokeai';
|
||||
|
||||
|
||||
/**
|
||||
* We can't use redux-toolkit's createSlice() to make these actions,
|
||||
* because they have no associated reducer. They only exist to dispatch
|
||||
@@ -11,28 +8,24 @@ import * as InvokeAI from '../invokeai';
|
||||
* by the middleware.
|
||||
*/
|
||||
|
||||
export const generateImage = createAction<InvokeTabName>(
|
||||
'socketio/generateImage'
|
||||
);
|
||||
export const generateImage = createAction<undefined>('socketio/generateImage');
|
||||
export const runESRGAN = createAction<InvokeAI.Image>('socketio/runESRGAN');
|
||||
export const runFacetool = createAction<InvokeAI.Image>('socketio/runFacetool');
|
||||
export const runGFPGAN = createAction<InvokeAI.Image>('socketio/runGFPGAN');
|
||||
export const deleteImage = createAction<InvokeAI.Image>('socketio/deleteImage');
|
||||
export const requestImages = createAction<GalleryCategory>(
|
||||
export const requestImages = createAction<undefined>(
|
||||
'socketio/requestImages'
|
||||
);
|
||||
export const requestNewImages = createAction<GalleryCategory>(
|
||||
export const requestNewImages = createAction<undefined>(
|
||||
'socketio/requestNewImages'
|
||||
);
|
||||
export const cancelProcessing = createAction<undefined>(
|
||||
'socketio/cancelProcessing'
|
||||
);
|
||||
export const uploadImage = createAction<InvokeAI.UploadImagePayload>('socketio/uploadImage');
|
||||
export const uploadInitialImage = createAction<File>(
|
||||
'socketio/uploadInitialImage'
|
||||
);
|
||||
export const uploadMaskImage = createAction<File>('socketio/uploadMaskImage');
|
||||
|
||||
export const requestSystemConfig = createAction<undefined>(
|
||||
'socketio/requestSystemConfig'
|
||||
);
|
||||
|
||||
export const requestModelChange = createAction<string>(
|
||||
'socketio/requestModelChange'
|
||||
);
|
||||
|
||||
@@ -1,26 +1,13 @@
|
||||
import { AnyAction, Dispatch, MiddlewareAPI } from '@reduxjs/toolkit';
|
||||
import dateFormat from 'dateformat';
|
||||
import { Socket } from 'socket.io-client';
|
||||
import {
|
||||
frontendToBackendParameters,
|
||||
FrontendToBackendParametersConfig,
|
||||
} from '../../common/util/parameterTranslation';
|
||||
import {
|
||||
GalleryCategory,
|
||||
GalleryState,
|
||||
} from '../../features/gallery/gallerySlice';
|
||||
import { OptionsState } from '../../features/options/optionsSlice';
|
||||
import { frontendToBackendParameters } from '../../common/util/parameterTranslation';
|
||||
import {
|
||||
addLogEntry,
|
||||
errorOccurred,
|
||||
setCurrentStatus,
|
||||
setIsCancelable,
|
||||
setIsProcessing,
|
||||
} from '../../features/system/systemSlice';
|
||||
import { inpaintingImageElementRef } from '../../features/tabs/Inpainting/InpaintingCanvas';
|
||||
import { InvokeTabName } from '../../features/tabs/InvokeTabs';
|
||||
import { tabMap, tab_dict } from '../../features/tabs/InvokeTabs';
|
||||
import * as InvokeAI from '../invokeai';
|
||||
import { RootState } from '../store';
|
||||
|
||||
/**
|
||||
* Returns an object containing all functions which use `socketio.emit()`.
|
||||
@@ -34,87 +21,39 @@ const makeSocketIOEmitters = (
|
||||
const { dispatch, getState } = store;
|
||||
|
||||
return {
|
||||
emitGenerateImage: (generationMode: InvokeTabName) => {
|
||||
emitGenerateImage: () => {
|
||||
dispatch(setIsProcessing(true));
|
||||
|
||||
const state: RootState = getState();
|
||||
const options = { ...getState().options };
|
||||
|
||||
const {
|
||||
options: optionsState,
|
||||
system: systemState,
|
||||
inpainting: inpaintingState,
|
||||
gallery: galleryState,
|
||||
} = state;
|
||||
|
||||
const frontendToBackendParametersConfig: FrontendToBackendParametersConfig =
|
||||
{
|
||||
generationMode,
|
||||
optionsState,
|
||||
inpaintingState,
|
||||
systemState,
|
||||
};
|
||||
|
||||
if (generationMode === 'inpainting') {
|
||||
if (
|
||||
!inpaintingImageElementRef.current ||
|
||||
!inpaintingState.imageToInpaint?.url
|
||||
) {
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: 'Inpainting image not loaded, cannot generate image.',
|
||||
level: 'error',
|
||||
})
|
||||
);
|
||||
dispatch(errorOccurred());
|
||||
return;
|
||||
}
|
||||
|
||||
frontendToBackendParametersConfig.imageToProcessUrl =
|
||||
inpaintingState.imageToInpaint.url;
|
||||
|
||||
frontendToBackendParametersConfig.maskImageElement =
|
||||
inpaintingImageElementRef.current;
|
||||
} else if (!['txt2img', 'img2img'].includes(generationMode)) {
|
||||
if (!galleryState.currentImage?.url) return;
|
||||
|
||||
frontendToBackendParametersConfig.imageToProcessUrl =
|
||||
galleryState.currentImage.url;
|
||||
if (tabMap[options.activeTab] === 'txt2img') {
|
||||
options.shouldUseInitImage = false;
|
||||
}
|
||||
|
||||
const { generationParameters, esrganParameters, facetoolParameters } =
|
||||
frontendToBackendParameters(frontendToBackendParametersConfig);
|
||||
const { generationParameters, esrganParameters, gfpganParameters } =
|
||||
frontendToBackendParameters(options, getState().system);
|
||||
|
||||
socketio.emit(
|
||||
'generateImage',
|
||||
generationParameters,
|
||||
esrganParameters,
|
||||
facetoolParameters
|
||||
gfpganParameters
|
||||
);
|
||||
|
||||
// we need to truncate the init_mask base64 else it takes up the whole log
|
||||
// TODO: handle maintaining masks for reproducibility in future
|
||||
if (generationParameters.init_mask) {
|
||||
generationParameters.init_mask = generationParameters.init_mask
|
||||
.substr(0, 20)
|
||||
.concat('...');
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Image generation requested: ${JSON.stringify({
|
||||
...generationParameters,
|
||||
...esrganParameters,
|
||||
...facetoolParameters,
|
||||
...gfpganParameters,
|
||||
})}`,
|
||||
})
|
||||
);
|
||||
},
|
||||
emitRunESRGAN: (imageToProcess: InvokeAI.Image) => {
|
||||
dispatch(setIsProcessing(true));
|
||||
const options: OptionsState = getState().options;
|
||||
const { upscalingLevel, upscalingStrength } = options;
|
||||
const { upscalingLevel, upscalingStrength } = getState().options;
|
||||
const esrganParameters = {
|
||||
upscale: [upscalingLevel, upscalingStrength],
|
||||
};
|
||||
@@ -132,55 +71,44 @@ const makeSocketIOEmitters = (
|
||||
})
|
||||
);
|
||||
},
|
||||
emitRunFacetool: (imageToProcess: InvokeAI.Image) => {
|
||||
emitRunGFPGAN: (imageToProcess: InvokeAI.Image) => {
|
||||
dispatch(setIsProcessing(true));
|
||||
const options: OptionsState = getState().options;
|
||||
const { facetoolType, facetoolStrength, codeformerFidelity } = options;
|
||||
const { gfpganStrength } = getState().options;
|
||||
|
||||
const facetoolParameters: Record<string, any> = {
|
||||
facetool_strength: facetoolStrength,
|
||||
const gfpganParameters = {
|
||||
facetool_strength: gfpganStrength,
|
||||
};
|
||||
|
||||
if (facetoolType === 'codeformer') {
|
||||
facetoolParameters.codeformer_fidelity = codeformerFidelity;
|
||||
}
|
||||
|
||||
socketio.emit('runPostprocessing', imageToProcess, {
|
||||
type: facetoolType,
|
||||
...facetoolParameters,
|
||||
type: 'gfpgan',
|
||||
...gfpganParameters,
|
||||
});
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Face restoration (${facetoolType}) requested: ${JSON.stringify(
|
||||
{
|
||||
file: imageToProcess.url,
|
||||
...facetoolParameters,
|
||||
}
|
||||
)}`,
|
||||
message: `GFPGAN fix faces requested: ${JSON.stringify({
|
||||
file: imageToProcess.url,
|
||||
...gfpganParameters,
|
||||
})}`,
|
||||
})
|
||||
);
|
||||
},
|
||||
emitDeleteImage: (imageToDelete: InvokeAI.Image) => {
|
||||
const { url, uuid, category } = imageToDelete;
|
||||
socketio.emit('deleteImage', url, uuid, category);
|
||||
const { url, uuid } = imageToDelete;
|
||||
socketio.emit('deleteImage', url, uuid);
|
||||
},
|
||||
emitRequestImages: (category: GalleryCategory) => {
|
||||
const gallery: GalleryState = getState().gallery;
|
||||
const { earliest_mtime } = gallery.categories[category];
|
||||
socketio.emit('requestImages', category, earliest_mtime);
|
||||
emitRequestImages: () => {
|
||||
const { earliest_mtime } = getState().gallery;
|
||||
socketio.emit('requestImages', earliest_mtime);
|
||||
},
|
||||
emitRequestNewImages: (category: GalleryCategory) => {
|
||||
const gallery: GalleryState = getState().gallery;
|
||||
const { latest_mtime } = gallery.categories[category];
|
||||
socketio.emit('requestLatestImages', category, latest_mtime);
|
||||
emitRequestNewImages: () => {
|
||||
const { latest_mtime } = getState().gallery;
|
||||
socketio.emit('requestLatestImages', latest_mtime);
|
||||
},
|
||||
emitCancelProcessing: () => {
|
||||
socketio.emit('cancel');
|
||||
},
|
||||
emitUploadImage: (payload: InvokeAI.UploadImagePayload) => {
|
||||
const { file, destination } = payload;
|
||||
socketio.emit('uploadImage', file, file.name, destination);
|
||||
emitUploadInitialImage: (file: File) => {
|
||||
socketio.emit('uploadInitialImage', file, file.name);
|
||||
},
|
||||
emitUploadMaskImage: (file: File) => {
|
||||
socketio.emit('uploadMaskImage', file, file.name);
|
||||
@@ -188,12 +116,6 @@ const makeSocketIOEmitters = (
|
||||
emitRequestSystemConfig: () => {
|
||||
socketio.emit('requestSystemConfig');
|
||||
},
|
||||
emitRequestModelChange: (modelName: string) => {
|
||||
dispatch(setCurrentStatus('Changing Model'));
|
||||
dispatch(setIsProcessing(true));
|
||||
dispatch(setIsCancelable(false));
|
||||
socketio.emit('requestModelChange', modelName);
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -13,27 +13,21 @@ import {
|
||||
setSystemConfig,
|
||||
processingCanceled,
|
||||
errorOccurred,
|
||||
setModelList,
|
||||
setIsCancelable,
|
||||
} from '../../features/system/systemSlice';
|
||||
|
||||
import {
|
||||
addGalleryImages,
|
||||
addImage,
|
||||
clearIntermediateImage,
|
||||
GalleryState,
|
||||
removeImage,
|
||||
setCurrentImage,
|
||||
setIntermediateImage,
|
||||
} from '../../features/gallery/gallerySlice';
|
||||
|
||||
import {
|
||||
clearInitialImage,
|
||||
setInitialImage,
|
||||
setInitialImagePath,
|
||||
setMaskPath,
|
||||
} from '../../features/options/optionsSlice';
|
||||
import { requestImages, requestNewImages } from './actions';
|
||||
import { clearImageToInpaint, setImageToInpaint } from '../../features/tabs/Inpainting/inpaintingSlice';
|
||||
|
||||
/**
|
||||
* Returns an object containing listener callbacks for socketio events.
|
||||
@@ -52,18 +46,10 @@ const makeSocketIOListeners = (
|
||||
try {
|
||||
dispatch(setIsConnected(true));
|
||||
dispatch(setCurrentStatus('Connected'));
|
||||
const gallery: GalleryState = getState().gallery;
|
||||
|
||||
if (gallery.categories.user.latest_mtime) {
|
||||
dispatch(requestNewImages('user'));
|
||||
if (getState().gallery.latest_mtime) {
|
||||
dispatch(requestNewImages());
|
||||
} else {
|
||||
dispatch(requestImages('user'));
|
||||
}
|
||||
|
||||
if (gallery.categories.result.latest_mtime) {
|
||||
dispatch(requestNewImages('result'));
|
||||
} else {
|
||||
dispatch(requestImages('result'));
|
||||
dispatch(requestImages());
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
@@ -93,19 +79,21 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, mtime, metadata } = data;
|
||||
const newUuid = uuidv4();
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
category: 'result',
|
||||
image: {
|
||||
uuid: uuidv4(),
|
||||
...data,
|
||||
},
|
||||
uuid: newUuid,
|
||||
url,
|
||||
mtime,
|
||||
metadata: metadata,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Image generated: ${data.url}`,
|
||||
message: `Image generated: ${url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
@@ -117,16 +105,20 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onIntermediateResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const uuid = uuidv4();
|
||||
const { url, metadata, mtime } = data;
|
||||
dispatch(
|
||||
setIntermediateImage({
|
||||
uuid: uuidv4(),
|
||||
...data,
|
||||
uuid,
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Intermediate image generated: ${data.url}`,
|
||||
message: `Intermediate image generated: ${url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
@@ -138,20 +130,47 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onPostprocessingResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, metadata, mtime } = data;
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
category: 'result',
|
||||
image: {
|
||||
uuid: uuidv4(),
|
||||
...data,
|
||||
},
|
||||
uuid: uuidv4(),
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Postprocessed: ${data.url}`,
|
||||
message: `Postprocessed: ${url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Callback to run when we receive a 'gfpganResult' event.
|
||||
*/
|
||||
onGFPGANResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, metadata, mtime } = data;
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
uuid: uuidv4(),
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Fixed faces: ${url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
@@ -198,7 +217,7 @@ const makeSocketIOListeners = (
|
||||
* Callback to run when we receive a 'galleryImages' event.
|
||||
*/
|
||||
onGalleryImages: (data: InvokeAI.GalleryImagesResponse) => {
|
||||
const { images, areMoreImagesAvailable, category } = data;
|
||||
const { images, areMoreImagesAvailable } = data;
|
||||
|
||||
/**
|
||||
* the logic here ideally would be in the reducer but we have a side effect:
|
||||
@@ -207,18 +226,17 @@ const makeSocketIOListeners = (
|
||||
|
||||
// Generate a UUID for each image
|
||||
const preparedImages = images.map((image): InvokeAI.Image => {
|
||||
const { url, metadata, mtime } = image;
|
||||
return {
|
||||
uuid: uuidv4(),
|
||||
...image,
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
};
|
||||
});
|
||||
|
||||
dispatch(
|
||||
addGalleryImages({
|
||||
images: preparedImages,
|
||||
areMoreImagesAvailable,
|
||||
category,
|
||||
})
|
||||
addGalleryImages({ images: preparedImages, areMoreImagesAvailable })
|
||||
);
|
||||
|
||||
dispatch(
|
||||
@@ -237,12 +255,7 @@ const makeSocketIOListeners = (
|
||||
const { intermediateImage } = getState().gallery;
|
||||
|
||||
if (intermediateImage) {
|
||||
dispatch(
|
||||
addImage({
|
||||
category: 'result',
|
||||
image: intermediateImage,
|
||||
})
|
||||
);
|
||||
dispatch(addImage(intermediateImage));
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
@@ -263,22 +276,14 @@ const makeSocketIOListeners = (
|
||||
/**
|
||||
* Callback to run when we receive a 'imageDeleted' event.
|
||||
*/
|
||||
onImageDeleted: (data: InvokeAI.ImageDeletedResponse) => {
|
||||
const { url, uuid, category } = data;
|
||||
onImageDeleted: (data: InvokeAI.ImageUrlAndUuidResponse) => {
|
||||
const { url, uuid } = data;
|
||||
dispatch(removeImage(uuid));
|
||||
|
||||
// remove image from gallery
|
||||
dispatch(removeImage(data));
|
||||
const { initialImagePath, maskPath } = getState().options;
|
||||
|
||||
// remove references to image in options
|
||||
const { initialImage, maskPath } = getState().options;
|
||||
const { imageToInpaint } = getState().inpainting;
|
||||
|
||||
if (initialImage?.url === url || initialImage === url) {
|
||||
dispatch(clearInitialImage());
|
||||
}
|
||||
|
||||
if (imageToInpaint?.url === url) {
|
||||
dispatch(clearImageToInpaint());
|
||||
if (initialImagePath === url) {
|
||||
dispatch(setInitialImagePath(''));
|
||||
}
|
||||
|
||||
if (maskPath === url) {
|
||||
@@ -292,40 +297,18 @@ const makeSocketIOListeners = (
|
||||
})
|
||||
);
|
||||
},
|
||||
onImageUploaded: (data: InvokeAI.ImageUploadResponse) => {
|
||||
const { destination, ...rest } = data;
|
||||
const image = {
|
||||
uuid: uuidv4(),
|
||||
...rest,
|
||||
};
|
||||
|
||||
try {
|
||||
dispatch(addImage({ image, category: 'user' }));
|
||||
|
||||
switch (destination) {
|
||||
case 'img2img': {
|
||||
dispatch(setInitialImage(image));
|
||||
break;
|
||||
}
|
||||
case 'inpainting': {
|
||||
dispatch(setImageToInpaint(image));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
dispatch(setCurrentImage(image));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Image uploaded: ${data.url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
/**
|
||||
* Callback to run when we receive a 'initialImageUploaded' event.
|
||||
*/
|
||||
onInitialImageUploaded: (data: InvokeAI.ImageUrlResponse) => {
|
||||
const { url } = data;
|
||||
dispatch(setInitialImagePath(url));
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Initial image uploaded: ${url}`,
|
||||
})
|
||||
);
|
||||
},
|
||||
/**
|
||||
* Callback to run when we receive a 'maskImageUploaded' event.
|
||||
@@ -343,34 +326,6 @@ const makeSocketIOListeners = (
|
||||
onSystemConfig: (data: InvokeAI.SystemConfig) => {
|
||||
dispatch(setSystemConfig(data));
|
||||
},
|
||||
onModelChanged: (data: InvokeAI.ModelChangeResponse) => {
|
||||
const { model_name, model_list } = data;
|
||||
dispatch(setModelList(model_list));
|
||||
dispatch(setCurrentStatus('Model Changed'));
|
||||
dispatch(setIsProcessing(false));
|
||||
dispatch(setIsCancelable(false));
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Model changed: ${model_name}`,
|
||||
level: 'info',
|
||||
})
|
||||
);
|
||||
},
|
||||
onModelChangeFailed: (data: InvokeAI.ModelChangeResponse) => {
|
||||
const { model_name, model_list } = data;
|
||||
dispatch(setModelList(model_list));
|
||||
dispatch(setIsProcessing(false));
|
||||
dispatch(setIsCancelable(false));
|
||||
dispatch(errorOccurred());
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Model change failed: ${model_name}`,
|
||||
level: 'error',
|
||||
})
|
||||
);
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -22,11 +22,10 @@ import * as InvokeAI from '../invokeai';
|
||||
* some new action to handle whatever data was sent from the server.
|
||||
*/
|
||||
export const socketioMiddleware = () => {
|
||||
const { origin } = new URL(window.location.href);
|
||||
const { hostname, port } = new URL(window.location.href);
|
||||
|
||||
const socketio = io(origin, {
|
||||
const socketio = io(`http://${hostname}:${port}`, {
|
||||
timeout: 60000,
|
||||
path: window.location.pathname + 'socket.io',
|
||||
});
|
||||
|
||||
let areListenersSet = false;
|
||||
@@ -43,25 +42,22 @@ export const socketioMiddleware = () => {
|
||||
onGalleryImages,
|
||||
onProcessingCanceled,
|
||||
onImageDeleted,
|
||||
onImageUploaded,
|
||||
onInitialImageUploaded,
|
||||
onMaskImageUploaded,
|
||||
onSystemConfig,
|
||||
onModelChanged,
|
||||
onModelChangeFailed,
|
||||
} = makeSocketIOListeners(store);
|
||||
|
||||
const {
|
||||
emitGenerateImage,
|
||||
emitRunESRGAN,
|
||||
emitRunFacetool,
|
||||
emitRunGFPGAN,
|
||||
emitDeleteImage,
|
||||
emitRequestImages,
|
||||
emitRequestNewImages,
|
||||
emitCancelProcessing,
|
||||
emitUploadImage,
|
||||
emitUploadInitialImage,
|
||||
emitUploadMaskImage,
|
||||
emitRequestSystemConfig,
|
||||
emitRequestModelChange,
|
||||
} = makeSocketIOEmitters(store, socketio);
|
||||
|
||||
/**
|
||||
@@ -100,16 +96,13 @@ export const socketioMiddleware = () => {
|
||||
onProcessingCanceled();
|
||||
});
|
||||
|
||||
socketio.on('imageDeleted', (data: InvokeAI.ImageDeletedResponse) => {
|
||||
socketio.on('imageDeleted', (data: InvokeAI.ImageUrlAndUuidResponse) => {
|
||||
onImageDeleted(data);
|
||||
});
|
||||
|
||||
socketio.on(
|
||||
'imageUploaded',
|
||||
(data: InvokeAI.ImageUploadResponse) => {
|
||||
onImageUploaded(data);
|
||||
}
|
||||
);
|
||||
socketio.on('initialImageUploaded', (data: InvokeAI.ImageUrlResponse) => {
|
||||
onInitialImageUploaded(data);
|
||||
});
|
||||
|
||||
socketio.on('maskImageUploaded', (data: InvokeAI.ImageUrlResponse) => {
|
||||
onMaskImageUploaded(data);
|
||||
@@ -119,14 +112,6 @@ export const socketioMiddleware = () => {
|
||||
onSystemConfig(data);
|
||||
});
|
||||
|
||||
socketio.on('modelChanged', (data: InvokeAI.ModelChangeResponse) => {
|
||||
onModelChanged(data);
|
||||
});
|
||||
|
||||
socketio.on('modelChangeFailed', (data: InvokeAI.ModelChangeResponse) => {
|
||||
onModelChangeFailed(data);
|
||||
});
|
||||
|
||||
areListenersSet = true;
|
||||
}
|
||||
|
||||
@@ -135,7 +120,7 @@ export const socketioMiddleware = () => {
|
||||
*/
|
||||
switch (action.type) {
|
||||
case 'socketio/generateImage': {
|
||||
emitGenerateImage(action.payload);
|
||||
emitGenerateImage();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -144,8 +129,8 @@ export const socketioMiddleware = () => {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/runFacetool': {
|
||||
emitRunFacetool(action.payload);
|
||||
case 'socketio/runGFPGAN': {
|
||||
emitRunGFPGAN(action.payload);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -155,12 +140,12 @@ export const socketioMiddleware = () => {
|
||||
}
|
||||
|
||||
case 'socketio/requestImages': {
|
||||
emitRequestImages(action.payload);
|
||||
emitRequestImages();
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/requestNewImages': {
|
||||
emitRequestNewImages(action.payload);
|
||||
emitRequestNewImages();
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -169,8 +154,8 @@ export const socketioMiddleware = () => {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/uploadImage': {
|
||||
emitUploadImage(action.payload);
|
||||
case 'socketio/uploadInitialImage': {
|
||||
emitUploadInitialImage(action.payload);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -183,11 +168,6 @@ export const socketioMiddleware = () => {
|
||||
emitRequestSystemConfig();
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/requestModelChange': {
|
||||
emitRequestModelChange(action.payload);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
next(action);
|
||||
|
||||
@@ -7,7 +7,6 @@ import storage from 'redux-persist/lib/storage'; // defaults to localStorage for
|
||||
|
||||
import optionsReducer from '../features/options/optionsSlice';
|
||||
import galleryReducer from '../features/gallery/gallerySlice';
|
||||
import inpaintingReducer from '../features/tabs/Inpainting/inpaintingSlice';
|
||||
|
||||
import systemReducer from '../features/system/systemSlice';
|
||||
import { socketioMiddleware } from './socketio/middleware';
|
||||
@@ -33,14 +32,13 @@ import { socketioMiddleware } from './socketio/middleware';
|
||||
const rootPersistConfig = {
|
||||
key: 'root',
|
||||
storage,
|
||||
blacklist: ['gallery', 'system', 'inpainting'],
|
||||
blacklist: ['gallery', 'system'],
|
||||
};
|
||||
|
||||
const systemPersistConfig = {
|
||||
key: 'system',
|
||||
storage,
|
||||
blacklist: [
|
||||
'isCancelable',
|
||||
'isConnected',
|
||||
'isProcessing',
|
||||
'currentStep',
|
||||
@@ -55,30 +53,10 @@ const systemPersistConfig = {
|
||||
],
|
||||
};
|
||||
|
||||
const galleryPersistConfig = {
|
||||
key: 'gallery',
|
||||
storage,
|
||||
whitelist: [
|
||||
'galleryWidth',
|
||||
'shouldPinGallery',
|
||||
'shouldShowGallery',
|
||||
'galleryScrollPosition',
|
||||
'galleryImageMinimumWidth',
|
||||
'galleryImageObjectFit',
|
||||
],
|
||||
};
|
||||
|
||||
const inpaintingPersistConfig = {
|
||||
key: 'inpainting',
|
||||
storage,
|
||||
blacklist: ['pastLines', 'futuresLines', 'cursorPosition'],
|
||||
};
|
||||
|
||||
const reducers = combineReducers({
|
||||
options: optionsReducer,
|
||||
gallery: persistReducer(galleryPersistConfig, galleryReducer),
|
||||
gallery: galleryReducer,
|
||||
system: persistReducer(systemPersistConfig, systemReducer),
|
||||
inpainting: persistReducer(inpaintingPersistConfig, inpaintingReducer),
|
||||
});
|
||||
|
||||
const persistedReducer = persistReducer(rootPersistConfig, reducers);
|
||||
|
||||
@@ -25,10 +25,7 @@ const systemSelector = createSelector(
|
||||
const GuidePopover = ({ children, feature }: GuideProps) => {
|
||||
const shouldDisplayGuides = useAppSelector(systemSelector);
|
||||
const { text } = FEATURES[feature];
|
||||
|
||||
if (!shouldDisplayGuides) return null;
|
||||
|
||||
return (
|
||||
return shouldDisplayGuides ? (
|
||||
<Popover trigger={'hover'}>
|
||||
<PopoverTrigger>
|
||||
<Box>{children}</Box>
|
||||
@@ -43,6 +40,8 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
|
||||
<div className="guide-popover-guide-content">{text}</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
) : (
|
||||
<></>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
.invokeai__checkbox {
|
||||
.chakra-checkbox__label {
|
||||
margin-top: 1px;
|
||||
color: var(--text-color-secondary);
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
.chakra-checkbox__control {
|
||||
width: 1rem;
|
||||
height: 1rem;
|
||||
border: none;
|
||||
border-radius: 0.2rem;
|
||||
background-color: var(--input-checkbox-bg);
|
||||
|
||||
svg {
|
||||
width: 0.6rem;
|
||||
height: 0.6rem;
|
||||
stroke-width: 3px !important;
|
||||
}
|
||||
|
||||
&[data-checked] {
|
||||
color: var(--text-color);
|
||||
background-color: var(--input-checkbox-checked-bg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
import { Checkbox, CheckboxProps } from '@chakra-ui/react';
|
||||
|
||||
type IAICheckboxProps = CheckboxProps & {
|
||||
label: string;
|
||||
styleClass?: string;
|
||||
};
|
||||
|
||||
const IAICheckbox = (props: IAICheckboxProps) => {
|
||||
const { label, styleClass, ...rest } = props;
|
||||
return (
|
||||
<Checkbox className={`invokeai__checkbox ${styleClass}`} {...rest}>
|
||||
{label}
|
||||
</Checkbox>
|
||||
);
|
||||
};
|
||||
|
||||
export default IAICheckbox;
|
||||
@@ -1,8 +0,0 @@
|
||||
.invokeai__color-picker {
|
||||
.react-colorful__hue-pointer,
|
||||
.react-colorful__saturation-pointer {
|
||||
width: 1.5rem;
|
||||
height: 1.5rem;
|
||||
border-color: var(--white);
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
import { RgbaColorPicker } from 'react-colorful';
|
||||
import { ColorPickerBaseProps, RgbaColor } from 'react-colorful/dist/types';
|
||||
|
||||
type IAIColorPickerProps = ColorPickerBaseProps<RgbaColor> & {
|
||||
styleClass?: string;
|
||||
};
|
||||
|
||||
const IAIColorPicker = (props: IAIColorPickerProps) => {
|
||||
const { styleClass, ...rest } = props;
|
||||
|
||||
return (
|
||||
<RgbaColorPicker
|
||||
className={`invokeai__color-picker ${styleClass}`}
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default IAIColorPicker;
|
||||
@@ -1,20 +0,0 @@
|
||||
@use '../../styles/Mixins/' as *;
|
||||
|
||||
.icon-button {
|
||||
background-color: var(--btn-grey);
|
||||
cursor: pointer;
|
||||
|
||||
&:hover {
|
||||
background-color: var(--btn-grey-hover);
|
||||
}
|
||||
|
||||
&[data-selected=true] {
|
||||
background-color: var(--accent-color);
|
||||
&:hover {
|
||||
background-color: var(--accent-color-hover);
|
||||
}
|
||||
}
|
||||
&[disabled] {
|
||||
cursor: not-allowed;
|
||||
}
|
||||
}
|
||||
@@ -8,28 +8,20 @@ import {
|
||||
interface Props extends IconButtonProps {
|
||||
tooltip?: string;
|
||||
tooltipPlacement?: PlacementWithLogical | undefined;
|
||||
styleClass?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reusable customized button component. Originally was more customized - now probably unecessary.
|
||||
*
|
||||
* TODO: Get rid of this.
|
||||
*/
|
||||
const IAIIconButton = (props: Props) => {
|
||||
const {
|
||||
tooltip = '',
|
||||
tooltipPlacement = 'bottom',
|
||||
styleClass,
|
||||
onClick,
|
||||
cursor,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const { tooltip = '', tooltipPlacement = 'bottom', onClick, ...rest } = props;
|
||||
return (
|
||||
<Tooltip label={tooltip} hasArrow placement={tooltipPlacement}>
|
||||
<IconButton
|
||||
className={`icon-button ${styleClass}`}
|
||||
{...rest}
|
||||
cursor={cursor ? cursor : onClick ? 'pointer' : 'unset'}
|
||||
cursor={onClick ? 'pointer' : 'unset'}
|
||||
onClick={onClick}
|
||||
/>
|
||||
</Tooltip>
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
|
||||
&:focus {
|
||||
outline: none;
|
||||
border: 2px solid var(--input-border-color);
|
||||
box-shadow: 0 0 10px 0 var(--input-box-shadow-color);
|
||||
border: 2px solid var(--prompt-border-color);
|
||||
box-shadow: 0 0 10px 0 var(--prompt-box-shadow-color);
|
||||
}
|
||||
|
||||
&:disabled {
|
||||
|
||||
@@ -1,32 +1,15 @@
|
||||
.invokeai__number-input-form-control {
|
||||
.number-input {
|
||||
display: grid;
|
||||
grid-template-columns: max-content auto;
|
||||
column-gap: 1rem;
|
||||
align-items: center;
|
||||
|
||||
.invokeai__number-input-form-label {
|
||||
.number-input-label {
|
||||
color: var(--text-color-secondary);
|
||||
margin-right: 0;
|
||||
font-size: 1rem;
|
||||
margin-bottom: 0;
|
||||
flex-grow: 2;
|
||||
white-space: nowrap;
|
||||
|
||||
&[data-focus] + .invokeai__number-input-root {
|
||||
outline: none;
|
||||
border: 2px solid var(--input-border-color);
|
||||
box-shadow: 0 0 10px 0 var(--input-box-shadow-color);
|
||||
}
|
||||
|
||||
&[aria-invalid='true'] + .invokeai__number-input-root {
|
||||
outline: none;
|
||||
border: 2px solid var(--border-color-invalid);
|
||||
box-shadow: 0 0 10px 0 var(--box-shadow-color-invalid);
|
||||
}
|
||||
}
|
||||
|
||||
.invokeai__number-input-root {
|
||||
height: 2rem;
|
||||
.number-input-field {
|
||||
display: grid;
|
||||
grid-template-columns: auto max-content;
|
||||
column-gap: 0.5rem;
|
||||
@@ -36,39 +19,34 @@
|
||||
border-radius: 0.2rem;
|
||||
}
|
||||
|
||||
.invokeai__number-input-field {
|
||||
.number-input-entry {
|
||||
border: none;
|
||||
font-weight: bold;
|
||||
width: 100%;
|
||||
height: auto;
|
||||
padding: 0;
|
||||
font-size: 0.9rem;
|
||||
padding-left: 0.5rem;
|
||||
padding-right: 0.5rem;
|
||||
padding-inline-end: 0;
|
||||
|
||||
&:focus {
|
||||
outline: none;
|
||||
box-shadow: none;
|
||||
border: 2px solid var(--prompt-border-color);
|
||||
box-shadow: 0 0 10px 0 var(--prompt-box-shadow-color);
|
||||
}
|
||||
|
||||
&:disabled {
|
||||
opacity: 0.2;
|
||||
}
|
||||
}
|
||||
.invokeai__number-input-stepper {
|
||||
|
||||
.number-input-stepper {
|
||||
display: grid;
|
||||
padding-right: 0.5rem;
|
||||
padding-right: 0.7rem;
|
||||
|
||||
.invokeai__number-input-stepper-button {
|
||||
svg {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
}
|
||||
|
||||
.number-input-stepper-button {
|
||||
border: none;
|
||||
// expand arrow hitbox
|
||||
padding: 0 0.5rem;
|
||||
margin: 0 -0.5rem;
|
||||
|
||||
svg {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,12 +6,6 @@ import {
|
||||
NumberDecrementStepper,
|
||||
NumberInputProps,
|
||||
FormLabel,
|
||||
NumberInputFieldProps,
|
||||
NumberInputStepperProps,
|
||||
FormControlProps,
|
||||
FormLabelProps,
|
||||
TooltipProps,
|
||||
Tooltip,
|
||||
} from '@chakra-ui/react';
|
||||
import _ from 'lodash';
|
||||
import { FocusEvent, useEffect, useState } from 'react';
|
||||
@@ -29,12 +23,6 @@ interface Props extends Omit<NumberInputProps, 'onChange'> {
|
||||
max: number;
|
||||
clamp?: boolean;
|
||||
isInteger?: boolean;
|
||||
formControlProps?: FormControlProps;
|
||||
formLabelProps?: FormLabelProps;
|
||||
numberInputProps?: NumberInputProps;
|
||||
numberInputFieldProps?: NumberInputFieldProps;
|
||||
numberInputStepperProps?: NumberInputStepperProps;
|
||||
tooltipProps?: Omit<TooltipProps, 'children'>;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -46,6 +34,8 @@ const IAINumberInput = (props: Props) => {
|
||||
styleClass,
|
||||
isDisabled = false,
|
||||
showStepper = true,
|
||||
fontSize = '1rem',
|
||||
size = 'sm',
|
||||
width,
|
||||
textAlign,
|
||||
isInvalid,
|
||||
@@ -54,11 +44,6 @@ const IAINumberInput = (props: Props) => {
|
||||
min,
|
||||
max,
|
||||
isInteger = true,
|
||||
formControlProps,
|
||||
formLabelProps,
|
||||
numberInputFieldProps,
|
||||
numberInputStepperProps,
|
||||
tooltipProps,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
@@ -80,10 +65,7 @@ const IAINumberInput = (props: Props) => {
|
||||
* from the current value.
|
||||
*/
|
||||
useEffect(() => {
|
||||
if (
|
||||
!valueAsString.match(numberStringRegex) &&
|
||||
value !== Number(valueAsString)
|
||||
) {
|
||||
if (!valueAsString.match(numberStringRegex) && value !== Number(valueAsString)) {
|
||||
setValueAsString(String(value));
|
||||
}
|
||||
}, [value, valueAsString]);
|
||||
@@ -112,55 +94,47 @@ const IAINumberInput = (props: Props) => {
|
||||
};
|
||||
|
||||
return (
|
||||
<Tooltip {...tooltipProps}>
|
||||
<FormControl
|
||||
isDisabled={isDisabled}
|
||||
isInvalid={isInvalid}
|
||||
className={
|
||||
styleClass
|
||||
? `invokeai__number-input-form-control ${styleClass}`
|
||||
: `invokeai__number-input-form-control`
|
||||
}
|
||||
{...formControlProps}
|
||||
>
|
||||
<FormControl
|
||||
isDisabled={isDisabled}
|
||||
isInvalid={isInvalid}
|
||||
className={`number-input ${styleClass}`}
|
||||
>
|
||||
{label && (
|
||||
<FormLabel
|
||||
className="invokeai__number-input-form-label"
|
||||
style={{ display: label ? 'block' : 'none' }}
|
||||
{...formLabelProps}
|
||||
fontSize={fontSize}
|
||||
marginBottom={1}
|
||||
flexGrow={2}
|
||||
whiteSpace="nowrap"
|
||||
className="number-input-label"
|
||||
>
|
||||
{label}
|
||||
</FormLabel>
|
||||
<NumberInput
|
||||
className="invokeai__number-input-root"
|
||||
value={valueAsString}
|
||||
keepWithinRange={true}
|
||||
clampValueOnBlur={false}
|
||||
onChange={handleOnChange}
|
||||
onBlur={handleBlur}
|
||||
)}
|
||||
<NumberInput
|
||||
size={size}
|
||||
{...rest}
|
||||
className="number-input-field"
|
||||
value={valueAsString}
|
||||
keepWithinRange={true}
|
||||
clampValueOnBlur={false}
|
||||
onChange={handleOnChange}
|
||||
onBlur={handleBlur}
|
||||
>
|
||||
<NumberInputField
|
||||
fontSize={fontSize}
|
||||
className="number-input-entry"
|
||||
width={width}
|
||||
{...rest}
|
||||
textAlign={textAlign}
|
||||
/>
|
||||
<div
|
||||
className="number-input-stepper"
|
||||
style={showStepper ? { display: 'block' } : { display: 'none' }}
|
||||
>
|
||||
<NumberInputField
|
||||
className="invokeai__number-input-field"
|
||||
textAlign={textAlign}
|
||||
{...numberInputFieldProps}
|
||||
/>
|
||||
<div
|
||||
className="invokeai__number-input-stepper"
|
||||
style={showStepper ? { display: 'block' } : { display: 'none' }}
|
||||
>
|
||||
<NumberIncrementStepper
|
||||
{...numberInputStepperProps}
|
||||
className="invokeai__number-input-stepper-button"
|
||||
/>
|
||||
<NumberDecrementStepper
|
||||
{...numberInputStepperProps}
|
||||
className="invokeai__number-input-stepper-button"
|
||||
/>
|
||||
</div>
|
||||
</NumberInput>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
<NumberIncrementStepper className="number-input-stepper-button" />
|
||||
<NumberDecrementStepper className="number-input-stepper-button" />
|
||||
</div>
|
||||
</NumberInput>
|
||||
</FormControl>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
.invokeai__popover-content {
|
||||
min-width: unset;
|
||||
width: unset !important;
|
||||
padding: 1rem;
|
||||
border-radius: 0.5rem !important;
|
||||
background-color: var(--background-color) !important;
|
||||
border: 2px solid var(--border-color) !important;
|
||||
|
||||
.invokeai__popover-arrow {
|
||||
background-color: var(--background-color) !important;
|
||||
}
|
||||
}
|
||||