mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 14:58:00 -05:00
Compare commits
15 Commits
main
...
ryan/gguf-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bc300bc498 | ||
|
|
c95a8d2a3c | ||
|
|
b31ba61d23 | ||
|
|
579eb8718e | ||
|
|
b4c5210902 | ||
|
|
1f58b26e74 | ||
|
|
2eba5457da | ||
|
|
b1ac6f986e | ||
|
|
7213fceaa6 | ||
|
|
faae144b35 | ||
|
|
17bf03ab7f | ||
|
|
cc24a0e39f | ||
|
|
1a40b486e4 | ||
|
|
12f5247caa | ||
|
|
d43dd97016 |
@@ -1,11 +1,9 @@
|
||||
*
|
||||
!invokeai
|
||||
!pyproject.toml
|
||||
!uv.lock
|
||||
!docker/docker-entrypoint.sh
|
||||
!LICENSE
|
||||
|
||||
**/dist
|
||||
**/node_modules
|
||||
**/__pycache__
|
||||
**/*.egg-info
|
||||
**/*.egg-info
|
||||
@@ -1,5 +1,2 @@
|
||||
b3dccfaeb636599c02effc377cdd8a87d658256c
|
||||
218b6d0546b990fc449c876fb99f44b50c4daa35
|
||||
182580ff6970caed400be178c5b888514b75d7f2
|
||||
8e9d5c1187b0d36da80571ce4c8ba9b3a37b6c46
|
||||
99aac5870e1092b182e6c5f21abcaab6936a4ad1
|
||||
3
.gitattributes
vendored
3
.gitattributes
vendored
@@ -2,5 +2,4 @@
|
||||
# Only affects text files and ignores other file types.
|
||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||
* text=auto
|
||||
docker/** text eol=lf
|
||||
tests/test_model_probe/stripped_models/** filter=lfs diff=lfs merge=lfs -text
|
||||
docker/** text eol=lf
|
||||
33
.github/CODEOWNERS
vendored
33
.github/CODEOWNERS
vendored
@@ -1,31 +1,32 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku @psychedelicious
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
|
||||
|
||||
# nodes
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @hipsterusername @jazzhaiku
|
||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @psychedelicious @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @psychedelicious @hipsterusername
|
||||
/installer/ @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/configs @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
|
||||
# generation, model management, postprocessing
|
||||
/invokeai/backend @lstein @blessedcoolant @hipsterusername @jazzhaiku @psychedelicious @maryhipp
|
||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -21,20 +21,6 @@ body:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install_method
|
||||
attributes:
|
||||
label: Install method
|
||||
description: How did you install Invoke?
|
||||
multiple: false
|
||||
options:
|
||||
- "Invoke's Launcher"
|
||||
- 'Stability Matrix'
|
||||
- 'Pinokio'
|
||||
- 'Manual'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: __Describe your environment__
|
||||
@@ -90,8 +76,8 @@ body:
|
||||
attributes:
|
||||
label: Version number
|
||||
description: |
|
||||
The version of Invoke you have installed. If it is not the [latest version](https://github.com/invoke-ai/InvokeAI/releases/latest), please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. v6.0.2
|
||||
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. 3.6.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -99,17 +85,17 @@ body:
|
||||
id: browser-version
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Your web browser and version, if you do not use the Launcher's provided GUI.
|
||||
description: Your web browser and version.
|
||||
placeholder: ex. Firefox 123.0b3
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: python-deps
|
||||
attributes:
|
||||
label: System Information
|
||||
label: Python dependencies
|
||||
description: |
|
||||
Click the gear icon at the bottom left corner, then click "About". Click the copy button and then paste here.
|
||||
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
@@ -3,15 +3,15 @@ description: Installs frontend dependencies with pnpm, with caching
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: setup node 20
|
||||
- name: setup node 18
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
node-version: '18'
|
||||
|
||||
- name: setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
version: 8.15.6
|
||||
run_install: false
|
||||
|
||||
- name: get pnpm store directory
|
||||
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -18,6 +18,4 @@
|
||||
|
||||
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
|
||||
- [ ] _Tests added / updated (if applicable)_
|
||||
- [ ] _❗Changes to a redux slice have a corresponding migration_
|
||||
- [ ] _Documentation added / updated (if applicable)_
|
||||
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
|
||||
|
||||
21
.github/workflows/build-container.yml
vendored
21
.github/workflows/build-container.yml
vendored
@@ -45,9 +45,6 @@ jobs:
|
||||
steps:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
# the /mnt dir has 70GBs of free space
|
||||
# /dev/sda1 74G 28K 70G 1% /mnt
|
||||
# According to some online posts the /mnt is not always there, so checking before setting docker to use it
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
@@ -55,11 +52,6 @@ jobs:
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
if [ -d /mnt ]; then
|
||||
sudo chmod -R 777 /mnt
|
||||
echo '{"data-root": "/mnt/docker-root"}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
@@ -84,6 +76,9 @@ jobs:
|
||||
latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
suffix=-${{ matrix.gpu-driver }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
@@ -105,12 +100,10 @@ jobs:
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
build-args: |
|
||||
GPU_DRIVER=${{ matrix.gpu-driver }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: |
|
||||
# type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
# type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
# cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Builds and uploads python build artifacts.
|
||||
# Builds and uploads the installer and python build artifacts.
|
||||
|
||||
name: build wheel
|
||||
name: build installer
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
@@ -27,12 +27,19 @@ jobs:
|
||||
- name: setup frontend
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: build wheel
|
||||
id: build_wheel
|
||||
run: ./scripts/build_wheel.sh
|
||||
- name: create installer
|
||||
id: create_installer
|
||||
run: ./create_installer.sh
|
||||
working-directory: installer
|
||||
|
||||
- name: upload python distribution artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ steps.build_wheel.outputs.DIST_PATH }}
|
||||
path: ${{ steps.create_installer.outputs.DIST_PATH }}
|
||||
|
||||
- name: upload installer artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: installer
|
||||
path: ${{ steps.create_installer.outputs.INSTALLER_PATH }}
|
||||
7
.github/workflows/frontend-checks.yml
vendored
7
.github/workflows/frontend-checks.yml
vendored
@@ -44,12 +44,7 @@ jobs:
|
||||
- name: check for changed frontend files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
|
||||
7
.github/workflows/frontend-tests.yml
vendored
7
.github/workflows/frontend-tests.yml
vendored
@@ -44,12 +44,7 @@ jobs:
|
||||
- name: check for changed frontend files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
|
||||
30
.github/workflows/lfs-checks.yml
vendored
30
.github/workflows/lfs-checks.yml
vendored
@@ -1,30 +0,0 @@
|
||||
# Checks that large files and LFS-tracked files are properly checked in with pointer format.
|
||||
# Uses https://github.com/ppremk/lfs-warning to detect LFS issues.
|
||||
|
||||
name: 'lfs checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lfs-check:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
# Required to label and comment on the PRs
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check lfs files
|
||||
uses: ppremk/lfs-warning@v3.3
|
||||
28
.github/workflows/python-checks.yml
vendored
28
.github/workflows/python-checks.yml
vendored
@@ -34,9 +34,6 @@ on:
|
||||
|
||||
jobs:
|
||||
python-checks:
|
||||
env:
|
||||
# uv requires a venv by default - but for this, we can simply use the system python
|
||||
UV_SYSTEM_PYTHON: 1
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5 # expected run time: <1 min
|
||||
steps:
|
||||
@@ -46,12 +43,7 @@ jobs:
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
@@ -60,23 +52,25 @@ jobs:
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
- name: setup uv
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: check pypi classifiers
|
||||
- name: install ruff
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
|
||||
run: pip install ruff==0.6.0
|
||||
shell: bash
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||
run: ruff check --output-format=github .
|
||||
shell: bash
|
||||
|
||||
- name: ruff format
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 format --check .
|
||||
run: ruff format --check .
|
||||
shell: bash
|
||||
|
||||
40
.github/workflows/python-tests.yml
vendored
40
.github/workflows/python-tests.yml
vendored
@@ -39,15 +39,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
platform:
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
include:
|
||||
- platform: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: linux-cpu
|
||||
os: ubuntu-24.04
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: macos-default
|
||||
@@ -61,22 +70,14 @@ jobs:
|
||||
timeout-minutes: 15 # expected run time: 2-6 min, depending on platform
|
||||
env:
|
||||
PIP_USE_PEP517: '1'
|
||||
UV_SYSTEM_PYTHON: 1
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
# https://github.com/nschloe/action-cached-lfs-checkout
|
||||
uses: nschloe/action-cached-lfs-checkout@f46300cd8952454b9f0a21a3d133d4bd5684cfc2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
@@ -85,25 +86,20 @@ jobs:
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
- name: setup uv
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
env:
|
||||
UV_INDEX: ${{ matrix.extra-index-url }}
|
||||
run: uv pip install --editable ".[test]"
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
run: >
|
||||
pip3 install --editable=".[test]"
|
||||
|
||||
- name: run pytest
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
always_run: true
|
||||
|
||||
build:
|
||||
uses: ./.github/workflows/build-wheel.yml
|
||||
uses: ./.github/workflows/build-installer.yml
|
||||
|
||||
publish-testpypi:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
110
.github/workflows/typegen-checks.yml
vendored
110
.github/workflows/typegen-checks.yml
vendored
@@ -1,110 +0,0 @@
|
||||
# Runs typegen schema quality checks.
|
||||
# Frontend types should match the server.
|
||||
#
|
||||
# Checks for changes to files before running the checks.
|
||||
# If always_run is true, always runs the checks.
|
||||
|
||||
name: 'typegen checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
typegen-checks:
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 15 # expected run time: <5 min
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
- name: check for changed files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
src:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
|
||||
- name: setup uv
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: '3.11'
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
env:
|
||||
UV_INDEX: ${{ matrix.extra-index-url }}
|
||||
run: uv pip install --editable .
|
||||
|
||||
- name: install frontend dependencies
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: copy schema
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: cp invokeai/frontend/web/src/services/api/schema.ts invokeai/frontend/web/src/services/api/schema_orig.ts
|
||||
shell: bash
|
||||
|
||||
- name: generate schema
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: cd invokeai/frontend/web && uv run ../../../scripts/generate_openapi_schema.py | pnpm typegen
|
||||
shell: bash
|
||||
|
||||
- name: compare files
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: |
|
||||
if ! diff invokeai/frontend/web/src/services/api/schema.ts invokeai/frontend/web/src/services/api/schema_orig.ts; then
|
||||
echo "Files are different!";
|
||||
exit 1;
|
||||
fi
|
||||
shell: bash
|
||||
68
.github/workflows/uv-lock-checks.yml
vendored
68
.github/workflows/uv-lock-checks.yml
vendored
@@ -1,68 +0,0 @@
|
||||
# Check the `uv` lockfile for consistency with `pyproject.toml`.
|
||||
#
|
||||
# If this check fails, you should run `uv lock` to update the lockfile.
|
||||
|
||||
name: 'uv lock checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
uv-lock-checks:
|
||||
env:
|
||||
# uv requires a venv by default - but for this, we can simply use the system python
|
||||
UV_SYSTEM_PYTHON: 1
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5 # expected run time: <1 min
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
uvlock-pyprojecttoml:
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
|
||||
- name: setup uv
|
||||
if: ${{ steps.changed-files.outputs.uvlock-pyprojecttoml_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
|
||||
- name: check lockfile
|
||||
if: ${{ steps.changed-files.outputs.uvlock-pyprojecttoml_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv lock --locked # this will exit with 1 if the lockfile is not consistent with pyproject.toml
|
||||
shell: bash
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -180,7 +180,6 @@ cython_debug/
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
.vscode/
|
||||
.zed/
|
||||
|
||||
# source installer files
|
||||
installer/*zip
|
||||
@@ -189,6 +188,3 @@ installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
installer/InvokeAI-Installer/
|
||||
.aider*
|
||||
|
||||
.claude/
|
||||
|
||||
@@ -4,29 +4,21 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
name: black
|
||||
stages: [pre-commit]
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: black
|
||||
types: [python]
|
||||
|
||||
- id: flake8
|
||||
name: flake8
|
||||
stages: [pre-commit]
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: flake8
|
||||
types: [python]
|
||||
|
||||
- id: isort
|
||||
name: isort
|
||||
stages: [pre-commit]
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: isort
|
||||
types: [python]
|
||||
|
||||
- id: uvlock
|
||||
name: uv lock
|
||||
stages: [pre-commit]
|
||||
language: system
|
||||
entry: uv lock
|
||||
files: ^pyproject\.toml$
|
||||
pass_filenames: false
|
||||
types: [python]
|
||||
10
Makefile
10
Makefile
@@ -16,7 +16,7 @@ help:
|
||||
@echo "frontend-build Build the frontend in order to run on localhost:9090"
|
||||
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
|
||||
@echo "frontend-typegen Generate types for the frontend from the OpenAPI schema"
|
||||
@echo "wheel Build the wheel for the current version"
|
||||
@echo "installer-zip Build the installer .zip file for the current version"
|
||||
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
|
||||
@echo "openapi Generate the OpenAPI schema for the app, outputting to stdout"
|
||||
@echo "docs Serve the mkdocs site with live reload"
|
||||
@@ -64,13 +64,13 @@ frontend-dev:
|
||||
frontend-typegen:
|
||||
cd invokeai/frontend/web && python ../../../scripts/generate_openapi_schema.py | pnpm typegen
|
||||
|
||||
# Tag the release
|
||||
wheel:
|
||||
cd scripts && ./build_wheel.sh
|
||||
# Installer zip file
|
||||
installer-zip:
|
||||
cd installer && ./create_installer.sh
|
||||
|
||||
# Tag the release
|
||||
tag-release:
|
||||
cd scripts && ./tag_release.sh
|
||||
cd installer && ./tag_release.sh
|
||||
|
||||
# Generate the OpenAPI Schema for the app
|
||||
openapi:
|
||||
|
||||
47
README.md
47
README.md
@@ -30,12 +30,51 @@ Invoke is available in two editions:
|
||||
|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
|
||||
|
||||
# Installation
|
||||
</div>
|
||||
|
||||
To get started with Invoke, [Download the Installer](https://www.invoke.com/downloads).
|
||||
## Quick Start
|
||||
|
||||
For detailed step by step instructions, or for instructions on manual/docker installations, visit our documentation on [Installation and Updates][installation docs]
|
||||
1. Download and unzip the installer from the bottom of the [latest release][latest release link].
|
||||
2. Run the installer script.
|
||||
|
||||
- **Windows**: Double-click on the `install.bat` script.
|
||||
- **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter.
|
||||
- **Linux**: Run `install.sh`.
|
||||
|
||||
3. When prompted, enter a location for the install and select your GPU type.
|
||||
4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS.
|
||||
5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2.
|
||||
6. Select option 1 to start the application. Once it starts up, open your browser and go to <http://localhost:9090>.
|
||||
7. Open the model manager tab to install a starter model and then you'll be ready to generate.
|
||||
|
||||
More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs].
|
||||
|
||||
## Docker Container
|
||||
|
||||
We publish official container images in Github Container Registry: https://github.com/invoke-ai/InvokeAI/pkgs/container/invokeai. Both CUDA and ROCm images are available. Check the above link for relevant tags.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Ensure that Docker is set up to use the GPU. Refer to [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] documentation.
|
||||
|
||||
### Generate!
|
||||
|
||||
Run the container, modifying the command as necessary:
|
||||
|
||||
```bash
|
||||
docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai
|
||||
```
|
||||
|
||||
Then open `http://localhost:9090` and install some models using the Model Manager tab to begin generating.
|
||||
|
||||
For ROCm, add `--device /dev/kfd --device /dev/dri` to the `docker run` command.
|
||||
|
||||
### Persist your data
|
||||
|
||||
You will likely want to persist your workspace outside of the container. Use the `--volume /home/myuser/invokeai:/invokeai` flag to mount some local directory (using its **absolute** path) to the `/invokeai` path inside the container. Your generated images and models will reside there. You can use this directory with other InvokeAI installations, or switch between runtime directories as needed.
|
||||
|
||||
### DIY
|
||||
|
||||
Build your own image and customize the environment to match your needs using our `docker-compose` stack. See [README.md](./docker/README.md) in the [docker](./docker) directory.
|
||||
|
||||
## Troubleshooting, FAQ and Support
|
||||
|
||||
@@ -66,7 +105,7 @@ Invoke features an organized gallery system for easily storing, accessing, and r
|
||||
### Other features
|
||||
|
||||
- Support for both ckpt and diffusers models
|
||||
- SD1.5, SD2.0, SDXL, and FLUX support
|
||||
- SD1.5, SD2.0, and SDXL support
|
||||
- Upscaling Tools
|
||||
- Embedding Manager & Support
|
||||
- Model Manager & Support
|
||||
|
||||
14
SECURITY.md
14
SECURITY.md
@@ -1,14 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version of Invoke will receive security updates.
|
||||
We do not currently maintain multiple versions of the application with updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
|
||||
|
||||
At this time, we do not maintain a formal bug bounty program.
|
||||
|
||||
You can also share identified security issues with our team on huntr.com
|
||||
@@ -22,10 +22,6 @@
|
||||
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
|
||||
# GPU_DRIVER=cuda #| rocm
|
||||
|
||||
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
|
||||
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
|
||||
# RENDER_GROUP_ID=
|
||||
|
||||
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
||||
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
|
||||
# CONTAINER_UID=1000
|
||||
|
||||
@@ -1,11 +1,62 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
#### Web UI ------------------------------------
|
||||
## Builder stage
|
||||
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
FROM library/ubuntu:23.04 AS builder
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt-get install -y \
|
||||
git \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential
|
||||
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is default
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
|
||||
# xformers + triton fails to install on arm64
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install $extra_index_url_arg -e ".[xformers]"; \
|
||||
else \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
fi
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
||||
FROM node:20-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@10.x && corepack enable
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
@@ -13,95 +64,61 @@ RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN npx vite build
|
||||
|
||||
## Backend ---------------------------------------
|
||||
#### Runtime stage ---------------------------------------
|
||||
|
||||
FROM library/ubuntu:24.04
|
||||
FROM library/ubuntu:23.04 AS runtime
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
git \
|
||||
gosu \
|
||||
libglib2.0-0 \
|
||||
libgl1 \
|
||||
libglx-mesa0 \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
ENV \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
INVOKEAI_SRC=/opt/invokeai \
|
||||
PYTHON_VERSION=3.12 \
|
||||
UV_PYTHON=3.12 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_MANAGED_PYTHON=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
INVOKEAI_ROOT=/invokeai \
|
||||
INVOKEAI_HOST=0.0.0.0 \
|
||||
INVOKEAI_PORT=9090 \
|
||||
PATH="/opt/venv/bin:$PATH" \
|
||||
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
||||
CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
RUN apt update && apt install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
vim \
|
||||
tmux \
|
||||
ncdu \
|
||||
iotop \
|
||||
bzip2 \
|
||||
gosu \
|
||||
magic-wormhole \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev &&\
|
||||
apt-get clean && apt-get autoclean
|
||||
|
||||
ARG GPU_DRIVER=cuda
|
||||
|
||||
# Install `uv` for package management
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
ENV INVOKEAI_ROOT=/invokeai
|
||||
ENV INVOKEAI_HOST=0.0.0.0
|
||||
ENV INVOKEAI_PORT=9090
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
||||
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
|
||||
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv python install ${PYTHON_VERSION} && \
|
||||
# chmod --recursive a+rX /root/.local/share/uv/python
|
||||
chmod 711 /root
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||
#
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is the default
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
ulimit -n 30000 && \
|
||||
uv sync --extra $GPU_DRIVER --frozen
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4
|
||||
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
||||
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# Link amdgpu.ids for ROCm builds
|
||||
# contributed by https://github.com/Rubonnek
|
||||
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" && groupadd render
|
||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
RUN python3 -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web"]
|
||||
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# add sources last to minimize image changes on code changes
|
||||
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||
|
||||
# this should not increase image size because we've already installed dependencies
|
||||
# in a previous layer
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
ulimit -n 30000 && \
|
||||
uv pip install -e .[$GPU_DRIVER]
|
||||
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
#### Web UI ------------------------------------
|
||||
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN npx vite build
|
||||
|
||||
## Backend ---------------------------------------
|
||||
|
||||
FROM library/ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
git \
|
||||
gosu \
|
||||
libglib2.0-0 \
|
||||
libgl1 \
|
||||
libglx-mesa0 \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev \
|
||||
wget
|
||||
|
||||
ENV \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
INVOKEAI_SRC=/opt/invokeai \
|
||||
PYTHON_VERSION=3.12 \
|
||||
UV_PYTHON=3.12 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_MANAGED_PYTHON=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
INVOKEAI_ROOT=/invokeai \
|
||||
INVOKEAI_HOST=0.0.0.0 \
|
||||
INVOKEAI_PORT=9090 \
|
||||
PATH="/opt/venv/bin:$PATH" \
|
||||
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
||||
CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
|
||||
ARG GPU_DRIVER=cuda
|
||||
|
||||
# Install `uv` for package management
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
||||
|
||||
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv python install ${PYTHON_VERSION} && \
|
||||
# chmod --recursive a+rX /root/.local/share/uv/python
|
||||
chmod 711 /root
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||
#
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is the default
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
ulimit -n 30000 && \
|
||||
uv sync --extra $GPU_DRIVER --frozen
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
if [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
wget -O /tmp/amdgpu-install.deb \
|
||||
https://repo.radeon.com/amdgpu-install/6.3.4/ubuntu/noble/amdgpu-install_6.3.60304-1_all.deb && \
|
||||
apt install -y /tmp/amdgpu-install.deb && \
|
||||
apt update && \
|
||||
amdgpu-install --usecase=rocm -y && \
|
||||
apt-get autoclean && \
|
||||
apt clean && \
|
||||
rm -rf /tmp/* /var/tmp/* && \
|
||||
usermod -a -G render ubuntu && \
|
||||
usermod -a -G video ubuntu && \
|
||||
echo "\\n/opt/rocm/lib\\n/opt/rocm/lib64" >> /etc/ld.so.conf.d/rocm.conf && \
|
||||
ldconfig && \
|
||||
update-alternatives --auto rocm; \
|
||||
fi
|
||||
|
||||
## Heathen711: Leaving this for review input, will remove before merge
|
||||
# RUN --mount=type=cache,target=/var/cache/apt \
|
||||
# --mount=type=cache,target=/var/lib/apt \
|
||||
# if [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
# groupadd render && \
|
||||
# usermod -a -G render ubuntu && \
|
||||
# usermod -a -G video ubuntu; \
|
||||
# fi
|
||||
|
||||
## Link amdgpu.ids for ROCm builds
|
||||
## contributed by https://github.com/Rubonnek
|
||||
# RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||
# ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web"]
|
||||
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# add sources last to minimize image changes on code changes
|
||||
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||
|
||||
# this should not increase image size because we've already installed dependencies
|
||||
# in a previous layer
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
ulimit -n 30000 && \
|
||||
uv pip install -e .[$GPU_DRIVER]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
|
||||
|
||||
x-invokeai: &invokeai
|
||||
image: "ghcr.io/invoke-ai/invokeai:latest"
|
||||
image: "local/invokeai:latest"
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
@@ -47,9 +47,8 @@ services:
|
||||
|
||||
invokeai-rocm:
|
||||
<<: *invokeai
|
||||
environment:
|
||||
- AMD_VISIBLE_DEVICES=all
|
||||
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
|
||||
runtime: amd
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
profiles:
|
||||
- rocm
|
||||
|
||||
@@ -16,22 +16,8 @@ set -e -o pipefail
|
||||
|
||||
USER_ID=${CONTAINER_UID:-1000}
|
||||
USER=ubuntu
|
||||
# if the user does not exist, create it. It is expected to be present on ubuntu >=24.x
|
||||
_=$(id ${USER} 2>&1) || useradd -u ${USER_ID} ${USER}
|
||||
# ensure the UID is correct
|
||||
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||
|
||||
## ROCM specific configuration
|
||||
# render group within the container must match the host render group
|
||||
# otherwise the container will not be able to access the host GPU.
|
||||
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
|
||||
# ensure the render group exists
|
||||
groupmod -g ${RENDER_GROUP_ID} render
|
||||
usermod -a -G render ${USER}
|
||||
usermod -a -G video ${USER}
|
||||
fi
|
||||
|
||||
|
||||
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||
# We do not install openssh-server in the image by default to avoid bloat.
|
||||
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||
@@ -50,8 +36,6 @@ fi
|
||||
mkdir -p "${INVOKEAI_ROOT}"
|
||||
chown --recursive ${USER} "${INVOKEAI_ROOT}" || true
|
||||
cd "${INVOKEAI_ROOT}"
|
||||
export HF_HOME=${HF_HOME:-$INVOKEAI_ROOT/.cache/huggingface}
|
||||
export MPLCONFIGDIR=${MPLCONFIGDIR:-$INVOKEAI_ROOT/.matplotlib}
|
||||
|
||||
# Run the CMD as the Container User (not root).
|
||||
exec gosu ${USER} "$@"
|
||||
|
||||
@@ -13,7 +13,7 @@ run() {
|
||||
|
||||
# parse .env file for build args
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER=/ {print $2}' .env)"
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
|
||||
# default to 'cuda' profile
|
||||
[[ -z "$profile" ]] && profile="cuda"
|
||||
@@ -30,7 +30,7 @@ run() {
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
docker compose --profile "$profile" up -d "$service_name"
|
||||
docker compose --profile "$profile" logs -f
|
||||
docker compose logs -f
|
||||
}
|
||||
|
||||
run
|
||||
|
||||
139
docs/RELEASE.md
139
docs/RELEASE.md
@@ -1,50 +1,41 @@
|
||||
# Release Process
|
||||
|
||||
The Invoke application is published as a python package on [PyPI]. This includes both a source distribution and built distribution (a wheel).
|
||||
The app is published in twice, in different build formats.
|
||||
|
||||
Most users install it with the [Launcher](https://github.com/invoke-ai/launcher/), others with `pip`.
|
||||
|
||||
The launcher uses GitHub as the source of truth for available releases.
|
||||
|
||||
## Broad Strokes
|
||||
|
||||
- Merge all changes and bump the version in the codebase.
|
||||
- Tag the release commit.
|
||||
- Wait for the release workflow to complete.
|
||||
- Approve the PyPI publish jobs.
|
||||
- Write GH release notes.
|
||||
- A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build.
|
||||
- An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs.
|
||||
|
||||
## General Prep
|
||||
|
||||
Make a developer call-out for PRs to merge. Merge and test things out. Bump the version by editing `invokeai/version/invokeai_version.py`.
|
||||
Make a developer call-out for PRs to merge. Merge and test things out.
|
||||
|
||||
While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build.
|
||||
|
||||
## Release Workflow
|
||||
|
||||
The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
|
||||
|
||||
It is triggered on **tag push**, when the tag matches `v*`.
|
||||
It is triggered on **tag push**, when the tag matches `v*`. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same.
|
||||
|
||||
> Because commits are reference-counted, it is safe to create a release branch, tag it, let the workflow run, then delete the branch. So long as the tag exists, that commit will exist.
|
||||
|
||||
### Triggering the Workflow
|
||||
|
||||
Ensure all commits that should be in the release are merged, and you have pulled them locally.
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow.
|
||||
|
||||
Double-check that you have checked out the commit that will represent the release (typically the latest commit on `main`).
|
||||
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow. You will be prompted to provide a message - use the version specifier.
|
||||
|
||||
If this version's tag already exists for some reason (maybe you had to make a last minute change), the script will overwrite it.
|
||||
|
||||
> In case you cannot use the Make target, the release may also be dispatched [manually] via GH.
|
||||
The release may also be dispatched [manually].
|
||||
|
||||
### Workflow Jobs and Process
|
||||
|
||||
The workflow consists of a number of concurrently-run checks and tests, then two final publish jobs.
|
||||
The workflow consists of a number of concurrently-run jobs, and two final publish jobs.
|
||||
|
||||
The publish jobs require manual approval and are only run if the other jobs succeed.
|
||||
|
||||
#### `check-version` Job
|
||||
|
||||
This job ensures that the `invokeai` python package version specifier matches the tag for the release. The version specifier is pulled from the `__version__` variable in `invokeai/version/invokeai_version.py`.
|
||||
This job checks that the git ref matches the app version. It matches the ref against the `__version__` variable in `invokeai/version/invokeai_version.py`.
|
||||
|
||||
When the workflow is triggered by tag push, the ref is the tag. If the workflow is run manually, the ref is the target selected from the **Use workflow from** dropdown.
|
||||
|
||||
This job uses [samuelcolvin/check-python-version].
|
||||
|
||||
@@ -52,47 +43,62 @@ This job uses [samuelcolvin/check-python-version].
|
||||
|
||||
#### Check and Test Jobs
|
||||
|
||||
Next, these jobs run and must pass. They are the same jobs that are run for every PR.
|
||||
|
||||
- **`python-tests`**: runs `pytest` on matrix of platforms
|
||||
- **`python-checks`**: runs `ruff` (format and lint)
|
||||
- **`frontend-tests`**: runs `vitest`
|
||||
- **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports)
|
||||
- **`typegen-checks`**: ensures the frontend and backend types are synced
|
||||
|
||||
#### `build-wheel` Job
|
||||
> **TODO** We should add `mypy` or `pyright` to the **`check-python`** job.
|
||||
|
||||
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `./scripts/build_wheel.sh` and uploads `dist.zip`, which contains the wheel and unarchived build.
|
||||
> **TODO** We should add an end-to-end test job that generates an image.
|
||||
|
||||
You don't need to download or test these artifacts.
|
||||
#### `build-installer` Job
|
||||
|
||||
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts:
|
||||
|
||||
- **`dist`**: the python distribution, to be published on PyPI
|
||||
- **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release
|
||||
|
||||
#### Sanity Check & Smoke Test
|
||||
|
||||
At this point, the release workflow pauses as the remaining publish jobs require approval.
|
||||
At this point, the release workflow pauses as the remaining publish jobs require approval. Time to test the installer.
|
||||
|
||||
It's possible to test the python package before it gets published to PyPI. We've never had problems with it, so it's not necessary to do this.
|
||||
Because the installer pulls from PyPI, and we haven't published to PyPI yet, you will need to install from the wheel:
|
||||
|
||||
But, if you want to be extra-super careful, here's how to test it:
|
||||
- Download and unzip `dist.zip` and the installer from the **Summary** tab of the workflow
|
||||
- Run the installer script using the `--wheel` CLI arg, pointing at the wheel:
|
||||
|
||||
- Download the `dist.zip` build artifact from the `build-wheel` job
|
||||
- Unzip it and find the wheel file
|
||||
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/) - but instead of installing from PyPI, install from the wheel
|
||||
- Test the app
|
||||
```sh
|
||||
./install.sh --wheel ../InvokeAI-4.0.0rc6-py3-none-any.whl
|
||||
```
|
||||
|
||||
- Install to a temporary directory so you get the new user experience
|
||||
- Download a model and generate
|
||||
|
||||
> The same wheel file is bundled in the installer and in the `dist` artifact, which is uploaded to PyPI. You should end up with the exactly the same installation as if the installer got the wheel from PyPI.
|
||||
|
||||
##### Something isn't right
|
||||
|
||||
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?) and start over.
|
||||
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?).
|
||||
|
||||
Now you can start from the top:
|
||||
|
||||
- Fix the issues and PR the fixes per usual
|
||||
- Get the PR approved and merged per usual
|
||||
- Switch to `main` and pull in the fixes
|
||||
- Run `make tag-release` to move the tag to `HEAD` (which has the fixes) and kick off the release workflow again
|
||||
- Re-do the sanity check
|
||||
|
||||
#### PyPI Publish Jobs
|
||||
|
||||
The publish jobs will not run if any of the previous jobs fail.
|
||||
The publish jobs will run if any of the previous jobs fail.
|
||||
|
||||
They use [GitHub environments], which are configured as [trusted publishers] on PyPI.
|
||||
|
||||
Both jobs require a @hipsterusername or @psychedelicious to approve them from the workflow's **Summary** tab.
|
||||
Both jobs require a maintainer to approve them from the workflow's **Summary** tab.
|
||||
|
||||
- Click the **Review deployments** button
|
||||
- Select the environment (either `testpypi` or `pypi` - typically you select both)
|
||||
- Select the environment (either `testpypi` or `pypi`)
|
||||
- Click **Approve and deploy**
|
||||
|
||||
> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
|
||||
@@ -107,33 +113,46 @@ If there are no incidents, contact @hipsterusername or @lstein, who have owner a
|
||||
|
||||
Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment.
|
||||
|
||||
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release for some reason:
|
||||
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release.
|
||||
|
||||
- Approve this publish job without approving the prod publish
|
||||
- Let it finish
|
||||
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/), making sure to use the Test PyPI index URL: `https://test.pypi.org/simple/`
|
||||
- Test the app
|
||||
If approved and successful, you could try out the test release like this:
|
||||
|
||||
```sh
|
||||
# Create a new virtual environment
|
||||
python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist
|
||||
# Install the distribution from Test PyPI
|
||||
pip install --index-url https://test.pypi.org/simple/ invokeai
|
||||
# Run and test the app
|
||||
invokeai-web
|
||||
# Cleanup
|
||||
deactivate
|
||||
rm -rf ~/.test-invokeai-dist
|
||||
```
|
||||
|
||||
#### `publish-pypi` Job
|
||||
|
||||
Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
|
||||
|
||||
It's a good idea to wait to approve and run this job until you have the release notes ready!
|
||||
## Publish the GitHub Release with installer
|
||||
|
||||
## Prep and publish the GitHub Release
|
||||
Once the release is published to PyPI, it's time to publish the GitHub release.
|
||||
|
||||
1. [Draft a new release] on GitHub, choosing the tag that triggered the release.
|
||||
2. The **Generate release notes** button automatically inserts the changelog and new contributors. Make sure to select the correct tags for this release and the last stable release. GH often selects the wrong tags - do this manually.
|
||||
3. Write the release notes, describing important changes. Contributions from community members should be shouted out. Use the GH-generated changelog to see all contributors. If there are Weblate translation updates, open that PR and shout out every person who contributed a translation.
|
||||
4. Check **Set as a pre-release** if it's a pre-release.
|
||||
5. Approve and wait for the `publish-pypi` job to finish if you haven't already.
|
||||
6. Publish the GH release.
|
||||
7. Post the release in Discord in the [releases](https://discord.com/channels/1020123559063990373/1149260708098359327) channel with abbreviated notes. For example:
|
||||
> Invoke v5.7.0 (stable): <https://github.com/invoke-ai/InvokeAI/releases/tag/v5.7.0>
|
||||
>
|
||||
> It's a pretty big one - Form Builder, Metadata Nodes (thanks @SkunkWorxDark!), and much more.
|
||||
8. Right click the message in releases and copy the link to it. Then, post that link in the [new-release-discussion](https://discord.com/channels/1020123559063990373/1149506274971631688) channel. For example:
|
||||
> Invoke v5.7.0 (stable): <https://discord.com/channels/1020123559063990373/1149260708098359327/1344521744916021248>
|
||||
1. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases.
|
||||
1. Use `scripts/get_external_contributions.py` to get a list of external contributions to shout out in the release notes.
|
||||
1. Upload the zip file created in **`build`** job into the Assets section of the release notes.
|
||||
1. Check **Set as a pre-release** if it's a pre-release.
|
||||
1. Check **Create a discussion for this release**.
|
||||
1. Publish the release.
|
||||
1. Announce the release in Discord.
|
||||
|
||||
> **TODO** Workflows can create a GitHub release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up.
|
||||
|
||||
## Manual Build
|
||||
|
||||
The `build installer` workflow can be dispatched manually. This is useful to test the installer for a given branch or tag.
|
||||
|
||||
No checks are run, it just builds.
|
||||
|
||||
## Manual Release
|
||||
|
||||
@@ -141,10 +160,12 @@ The `release` workflow can be dispatched manually. You must dispatch the workflo
|
||||
|
||||
This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above.
|
||||
|
||||
[InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[PyPI]: https://pypi.org/
|
||||
[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
|
||||
[Test PyPI]: https://test.pypi.org/
|
||||
[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
|
||||
[ncipollo/release-action]: https://github.com/ncipollo/release-action
|
||||
[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
|
||||
[trusted publishers]: https://docs.pypi.org/trusted-publishers/
|
||||
[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version
|
||||
|
||||
@@ -39,7 +39,7 @@ It has two sections - one for internal use and one for user settings:
|
||||
|
||||
```yaml
|
||||
# Internal metadata - do not edit:
|
||||
schema_version: 4.0.2
|
||||
schema_version: 4
|
||||
|
||||
# Put user settings here - see https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/:
|
||||
host: 0.0.0.0 # serve the app on your local network
|
||||
@@ -83,10 +83,6 @@ A subset of settings may be specified using CLI args:
|
||||
- `--root`: specify the root directory
|
||||
- `--config`: override the default `invokeai.yaml` file location
|
||||
|
||||
### Low-VRAM Mode
|
||||
|
||||
See the [Low-VRAM mode docs][low-vram] for details on enabling this feature.
|
||||
|
||||
### All Settings
|
||||
|
||||
Following the table are additional explanations for certain settings.
|
||||
@@ -118,10 +114,6 @@ remote_api_tokens:
|
||||
|
||||
The provided token will be added as a `Bearer` token to the network requests to download the model files. As far as we know, this works for all model marketplaces that require authorization.
|
||||
|
||||
!!! tip "HuggingFace Models"
|
||||
|
||||
If you get an error when installing a HF model using a URL instead of repo id, you may need to [set up a HF API token](https://huggingface.co/settings/tokens) and add an entry for it under `remote_api_tokens`. Use `huggingface.co` for `url_regex`.
|
||||
|
||||
#### Model Hashing
|
||||
|
||||
Models are hashed during installation, providing a stable identifier for models across all platforms. Hashing is a one-time operation.
|
||||
@@ -189,4 +181,3 @@ The `log_format` option provides several alternative formats:
|
||||
|
||||
[basic guide to yaml files]: https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/
|
||||
[Model Marketplace API Keys]: #model-marketplace-api-keys
|
||||
[low-vram]: ./features/low-vram.md
|
||||
|
||||
@@ -50,7 +50,7 @@ Applications are built on top of the invoke framework. They should construct `in
|
||||
|
||||
### Web UI
|
||||
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/invokeai/frontend` and the backend code is found in `/invokeai/app/api_app.py` and `/invokeai/app/api/`. The code is further organized as such:
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||
|
||||
| Component | Description |
|
||||
| --- | --- |
|
||||
@@ -62,7 +62,7 @@ The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.t
|
||||
|
||||
### CLI
|
||||
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/invokeai/frontend/cli`.
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||
|
||||
## Invoke
|
||||
|
||||
@@ -70,7 +70,7 @@ The Invoke framework provides the interface to the underlying AI systems and is
|
||||
|
||||
### Invoker
|
||||
|
||||
The invoker (`/invokeai/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||
|
||||
@@ -82,12 +82,12 @@ The session graph does not support looping. This is left as an application probl
|
||||
|
||||
### Invocations
|
||||
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/invokeai/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
|
||||
### Services
|
||||
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/invokeai/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
|
||||
## AI Core
|
||||
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/invokeai/app/`).
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
||||
|
||||
@@ -39,7 +39,7 @@ nodes imported in the `__init__.py` file are loaded. See the README in the nodes
|
||||
folder for more examples:
|
||||
|
||||
```py
|
||||
from .cool_node import ResizeInvocation
|
||||
from .cool_node import CoolInvocation
|
||||
```
|
||||
|
||||
## Creating A New Invocation
|
||||
@@ -69,10 +69,7 @@ The first set of things we need to do when creating a new Invocation are -
|
||||
So let us do that.
|
||||
|
||||
```python
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
invocation,
|
||||
)
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -106,12 +103,8 @@ create your own custom field types later in this guide. For now, let's go ahead
|
||||
and use it.
|
||||
|
||||
```python
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
invocation,
|
||||
)
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -135,12 +128,8 @@ image: ImageField = InputField(description="The input image")
|
||||
Great. Now let us create our other inputs for `width` and `height`
|
||||
|
||||
```python
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
invocation,
|
||||
)
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -155,7 +144,7 @@ As you might have noticed, we added two new arguments to the `InputField`
|
||||
definition for `width` and `height`, called `gt` and `le`. They stand for
|
||||
_greater than or equal to_ and _less than or equal to_.
|
||||
|
||||
These impose constraints on those fields, and will raise an exception if the
|
||||
These impose contraints on those fields, and will raise an exception if the
|
||||
values do not meet the constraints. Field constraints are provided by
|
||||
**pydantic**, so anything you see in the **pydantic docs** will work.
|
||||
|
||||
@@ -174,13 +163,8 @@ that are provided by it by InvokeAI.
|
||||
Let us create this function first.
|
||||
|
||||
```python
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -207,14 +191,8 @@ all the necessary info related to image outputs. So let us use that.
|
||||
We will cover how to create your own output types later in this guide.
|
||||
|
||||
```python
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.image import ImageOutput
|
||||
|
||||
@invocation('resize')
|
||||
@@ -239,15 +217,9 @@ Perfect. Now that we have our Invocation setup, let us do what we want to do.
|
||||
So let's do that.
|
||||
|
||||
```python
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.image import ImageOutput
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.image import ImageOutput, ResourceOrigin, ImageCategory
|
||||
|
||||
@invocation("resize")
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -315,8 +287,8 @@ new Invocation ready to be used.
|
||||
|
||||
Once you've created a Node, the next step is to share it with the community! The
|
||||
best way to do this is to submit a Pull Request to add the Node to the
|
||||
[Community Nodes](../nodes/communityNodes.md) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](../nodes/contributingNodes.md).
|
||||
[Community Nodes](nodes/communityNodes) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](contributingNodes).
|
||||
|
||||
## Advanced
|
||||
|
||||
|
||||
@@ -239,7 +239,7 @@ Consult the
|
||||
get it set up.
|
||||
|
||||
Suggest using VSCode's included settings sync so that your remote dev host has
|
||||
all the same app settings and extensions automatically.
|
||||
all the same app settings and extensions automagically.
|
||||
|
||||
##### One remote dev gotcha
|
||||
|
||||
|
||||
@@ -9,20 +9,20 @@ model. These are the:
|
||||
configuration information. Among other things, the record service
|
||||
tracks the type of the model, its provenance, and where it can be
|
||||
found on disk.
|
||||
|
||||
|
||||
* _ModelInstallServiceBase_ A service for installing models to
|
||||
disk. It uses `DownloadQueueServiceBase` to download models and
|
||||
their metadata, and `ModelRecordServiceBase` to store that
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
|
||||
* _DownloadQueueServiceBase_
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
Hugging Face, as well as discriminating among model versions in
|
||||
Civitai, but can be used for arbitrary content.
|
||||
|
||||
|
||||
* _ModelLoadServiceBase_
|
||||
Responsible for loading a model from disk
|
||||
into RAM and VRAM and getting it ready for inference.
|
||||
@@ -207,9 +207,9 @@ for use in the InvokeAI web server. Its signature is:
|
||||
|
||||
```
|
||||
def open(
|
||||
cls,
|
||||
config: InvokeAIAppConfig,
|
||||
conn: Optional[sqlite3.Connection] = None,
|
||||
cls,
|
||||
config: InvokeAIAppConfig,
|
||||
conn: Optional[sqlite3.Connection] = None,
|
||||
lock: Optional[threading.Lock] = None
|
||||
) -> Union[ModelRecordServiceSQL, ModelRecordServiceFile]:
|
||||
```
|
||||
@@ -265,7 +265,7 @@ If the key is unrecognized, this call raises an
|
||||
|
||||
#### exists(key) -> AnyModelConfig
|
||||
|
||||
Returns True if a model with the given key exists in the database.
|
||||
Returns True if a model with the given key exists in the databsae.
|
||||
|
||||
#### search_by_path(path) -> AnyModelConfig
|
||||
|
||||
@@ -363,7 +363,7 @@ functionality:
|
||||
|
||||
* Registering a model config record for a model already located on the
|
||||
local filesystem, without moving it or changing its path.
|
||||
|
||||
|
||||
* Installing a model alreadiy located on the local filesystem, by
|
||||
moving it into the InvokeAI root directory under the
|
||||
`models` folder (or wherever config parameter `models_dir`
|
||||
@@ -371,21 +371,21 @@ functionality:
|
||||
|
||||
* Probing of models to determine their type, base type and other key
|
||||
information.
|
||||
|
||||
|
||||
* Interface with the InvokeAI event bus to provide status updates on
|
||||
the download, installation and registration process.
|
||||
|
||||
|
||||
* Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir`.
|
||||
|
||||
* Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16.
|
||||
|
||||
|
||||
* Saving tags and other metadata about the model into the invokeai database
|
||||
when fetching from a repo that provides that type of information,
|
||||
(currently only HuggingFace).
|
||||
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
A default installer is created at InvokeAI api startup time and stored
|
||||
@@ -461,7 +461,7 @@ revision.
|
||||
`config` is an optional dict of values that will override the
|
||||
autoprobed values for model type, base, scheduler prediction type, and
|
||||
so forth. See [Model configuration and
|
||||
probing](#model-configuration-and-probing) for details.
|
||||
probing](#Model-configuration-and-probing) for details.
|
||||
|
||||
`access_token` is an optional access token for accessing resources
|
||||
that need authentication.
|
||||
@@ -494,7 +494,7 @@ source8 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
|
||||
|
||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
|
||||
source2job = installer.wait_for_installs(timeout=120)
|
||||
for source in sources:
|
||||
job = source2job[source]
|
||||
@@ -504,7 +504,7 @@ for source in sources:
|
||||
print(f"{source} installed as {model_key}")
|
||||
elif job.errored:
|
||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||
|
||||
|
||||
```
|
||||
|
||||
As shown here, the `import_model()` method accepts a variety of
|
||||
@@ -718,7 +718,7 @@ When downloading remote models is implemented, additional
|
||||
configuration information, such as list of trigger terms, will be
|
||||
retrieved from the HuggingFace and Civitai model repositories.
|
||||
|
||||
The probed values can be overridden by providing a dictionary in the
|
||||
The probed values can be overriden by providing a dictionary in the
|
||||
optional `config` argument passed to `import_model()`. You may provide
|
||||
overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
@@ -841,7 +841,7 @@ variable.
|
||||
|
||||
#### installer.start(invoker)
|
||||
|
||||
The `start` method is called by the API initialization routines when
|
||||
The `start` method is called by the API intialization routines when
|
||||
the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
@@ -1364,6 +1364,7 @@ the in-memory loaded model:
|
||||
|----------------|-----------------|------------------|
|
||||
| `config` | AnyModelConfig | A copy of the model's configuration record for retrieving base type, etc. |
|
||||
| `model` | AnyModel | The instantiated model (details below) |
|
||||
| `locker` | ModelLockerBase | A context manager that mediates the movement of the model into VRAM |
|
||||
|
||||
### get_model_by_key(key, [submodel]) -> LoadedModel
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# InvokeAI Backend Tests
|
||||
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](https://github.com/invoke-ai/InvokeAI/blob/main/pyproject.toml) for the default `pytest` options.)
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.)
|
||||
|
||||
## Fast vs. Slow
|
||||
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator).
|
||||
@@ -33,7 +33,7 @@ pytest tests -m ""
|
||||
|
||||
## Test Organization
|
||||
|
||||
All backend tests are in the [`tests/`](https://github.com/invoke-ai/InvokeAI/tree/main/tests) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
|
||||
All backend tests are in the [`tests/`](/tests/) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
|
||||
|
||||
TODO: The above statement is aspirational. A re-organization of legacy tests is required to make it true.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## **What do I need to know to help?**
|
||||
|
||||
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
If you are looking to help to with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
|
||||
|
||||
## **Get Started**
|
||||
@@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](../frontend/index.md)
|
||||
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
@@ -20,15 +20,15 @@ Once you're setup, for more information, you can review the documentation specif
|
||||
|
||||
If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md), [translation](translation.md) or helping support other users and triage issues as they're reported in GitHub.
|
||||
|
||||
There are two paths to making a development contribution:
|
||||
There are two paths to making a development contribution:
|
||||
|
||||
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
|
||||
|
||||
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no one’s time is being misspent.*
|
||||
|
||||
## Best Practices:
|
||||
## Best Practices:
|
||||
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
|
||||
* Comments! Commenting your code helps reviewers easily understand your contribution
|
||||
* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
|
||||
@@ -38,7 +38,7 @@ There are two paths to making a development contribution:
|
||||
|
||||
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
|
||||
|
||||
For frontend related work, **@psychedelicious** is the best person to reach out to.
|
||||
For frontend related work, **@psychedelicious** is the best person to reach out to.
|
||||
|
||||
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@psychedelicious**.
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ If you're a new contributor to InvokeAI or Open Source Projects, this is the gui
|
||||
## New Contributor Checklist
|
||||
|
||||
- [x] Set up your local development environment & fork of InvokAI by following [the steps outlined here](../dev-environment.md)
|
||||
- [x] Set up your local tooling with [this guide](../LOCAL_DEVELOPMENT.md). Feel free to skip this step if you already have tooling you're comfortable with.
|
||||
- [x] Set up your local tooling with [this guide](InvokeAI/contributing/LOCAL_DEVELOPMENT/#developing-invokeai-in-vscode). Feel free to skip this step if you already have tooling you're comfortable with.
|
||||
- [x] Familiarize yourself with [Git](https://www.atlassian.com/git) & our project structure by reading through the [development documentation](development.md)
|
||||
- [x] Join the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord
|
||||
- [x] Choose an issue to work on! This can be achieved by asking in the #dev-chat channel, tackling a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) or finding an item on the [roadmap](https://github.com/orgs/invoke-ai/projects/7). If nothing in any of those places catches your eye, feel free to work on something of interest to you!
|
||||
@@ -22,15 +22,15 @@ Before starting these steps, ensure you have your local environment [configured
|
||||
2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
|
||||
3. Clone the repository to your local machine using:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
|
||||
If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface. 4. Create a new branch for your fix using:
|
||||
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
|
||||
5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
|
||||
6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Tutorials
|
||||
|
||||
Tutorials help new & existing users expand their ability to use InvokeAI to the full extent of our features and services.
|
||||
Tutorials help new & existing users expand their abilty to use InvokeAI to the full extent of our features and services.
|
||||
|
||||
Currently, we have a set of tutorials available on our [YouTube channel](https://www.youtube.com/@invokeai), but as InvokeAI continues to evolve with new updates, we want to ensure that we are giving our users the resources they need to succeed.
|
||||
|
||||
@@ -8,4 +8,4 @@ Tutorials can be in the form of videos or article walkthroughs on a subject of y
|
||||
|
||||
## Contributing
|
||||
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
@@ -16,7 +16,7 @@ We thank [all contributors](https://github.com/invoke-ai/InvokeAI/graphs/contrib
|
||||
- @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
- @joshistoast (Josh Corbett) - Web Development
|
||||
- @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Software engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @sunija - Standalone version
|
||||
- @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
- @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
# Dev Environment
|
||||
|
||||
To make changes to Invoke's backend, frontend or documentation, you'll need to set up a dev environment.
|
||||
To make changes to Invoke's backend, frontend, or documentation, you'll need to set up a dev environment.
|
||||
|
||||
If you only want to make changes to the docs site, you can skip the frontend dev environment setup as described in the below guide.
|
||||
If you just want to use Invoke, you should use the [installer][installer link].
|
||||
|
||||
If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
!!! info "Why do I need the frontend toolchain?"
|
||||
|
||||
The repo doesn't contain a build of the frontend. You'll be responsible for rebuilding it every time you pull in new changes, or run it in dev mode (which incurs a substantial performance penalty).
|
||||
|
||||
!!! warning
|
||||
|
||||
@@ -15,76 +17,81 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
## Setup
|
||||
|
||||
1. Run through the [requirements][requirements link].
|
||||
1. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||
1. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||
1. Create a python virtual environment inside the directory you just created:
|
||||
|
||||
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||
```sh
|
||||
python3 -m venv .venv --prompt InvokeAI-Dev
|
||||
```
|
||||
|
||||
3. This repository uses Git LFS to manage large files. To ensure all assets are downloaded:
|
||||
- Install git-lfs → [Download here](https://git-lfs.com/)
|
||||
- Enable automatic LFS fetching for this repository:
|
||||
```shell
|
||||
git config lfs.fetchinclude "*"
|
||||
```
|
||||
- Fetch files from LFS (only needs to be done once; subsequent `git pull` will fetch changes automatically):
|
||||
```
|
||||
git lfs pull
|
||||
```
|
||||
4. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||
1. Activate the venv (you'll need to do this every time you want to run the app):
|
||||
|
||||
5. Follow the [manual install][manual install link] guide, with some modifications to the install command:
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
- Use `.` instead of `invokeai` to install from the current directory. You don't need to specify the version.
|
||||
1. Install the repo as an [editable install][editable install link]:
|
||||
|
||||
- Add `-e` after the `install` operation to make this an [editable install][editable install link]. That means your changes to the python code will be reflected when you restart the Invoke server.
|
||||
```sh
|
||||
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
- When installing the `invokeai` package, add the `dev`, `test` and `docs` package options to the package specifier. You may or may not need the `xformers` option - follow the manual install guide to figure that out. So, your package specifier will be either `".[dev,test,docs]"` or `".[dev,test,docs,xformers]"`. Note the quotes!
|
||||
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
|
||||
With the modifications made, the install command should look something like this:
|
||||
1. Install the frontend dev toolchain:
|
||||
|
||||
```sh
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall
|
||||
```
|
||||
- [`nodejs`](https://nodejs.org/) (recommend v20 LTS)
|
||||
- [`pnpm`](https://pnpm.io/installation#installing-a-specific-version) (must be v8 - not v9!)
|
||||
|
||||
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||
1. Do a production build of the frontend:
|
||||
|
||||
This is because the UI build is not distributed with the source code. You need to build it manually. End the running server instance.
|
||||
```sh
|
||||
pnpm build
|
||||
```
|
||||
|
||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||
1. Start the application:
|
||||
|
||||
7. Install the frontend dev toolchain, paying attention to versions:
|
||||
```sh
|
||||
python scripts/invokeai-web.py
|
||||
```
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (tested on LTS, v22)
|
||||
|
||||
- [`pnpm`](https://pnpm.io/installation) (tested on v10)
|
||||
|
||||
8. Do a production build of the frontend:
|
||||
|
||||
```sh
|
||||
cd <PATH_TO_INVOKEAI_REPO>/invokeai/frontend/web
|
||||
pnpm i
|
||||
pnpm build
|
||||
```
|
||||
|
||||
9. Restart the server and navigate to the URL. You should get a UI. After making changes to the python code, restart the server to see those changes.
|
||||
1. Access the UI at `localhost:9090`.
|
||||
|
||||
## Updating the UI
|
||||
|
||||
You'll need to run `pnpm build` every time you pull in new changes.
|
||||
|
||||
Another option is to skip the build and instead run the UI in dev mode:
|
||||
You'll need to run `pnpm build` every time you pull in new changes. Another option is to skip the build and instead run the app in dev mode:
|
||||
|
||||
```sh
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
This starts a vite dev server for the UI at `127.0.0.1:5173`, which you will use instead of `127.0.0.1:9090`.
|
||||
This starts a dev server at `localhost:5173`, which you will use instead of `localhost:9090`.
|
||||
|
||||
The dev mode is substantially slower than the production build but may be more convenient if you just need to test things out. It will hot-reload the UI as you make changes to the frontend code. Sometimes the hot-reload doesn't work, and you need to manually refresh the browser tab.
|
||||
The dev mode is substantially slower than the production build but may be more convenient if you just need to test things out.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is built with `mkdocs`. It provides a hot-reload dev server for the docs. Start it with `mkdocs serve`.
|
||||
The documentation is built with `mkdocs`. To preview it locally, you need a additional set of packages installed.
|
||||
|
||||
[launcher link]: ../installation/quick_start.md
|
||||
```sh
|
||||
# after activating the venv
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
Then, you can start a live docs dev server, which will auto-refresh when you edit the docs:
|
||||
|
||||
```sh
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
On macOS and Linux, there is a `make` target for this:
|
||||
|
||||
```sh
|
||||
make docs
|
||||
```
|
||||
|
||||
[installer link]: ../installation/installer.md
|
||||
[forking link]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo
|
||||
[requirements link]: ../installation/requirements.md
|
||||
[repo link]: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
@@ -34,11 +34,11 @@ Please reach out to @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||
|
||||
## Contributors
|
||||
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](contributors.md). We thank them for their time, hard work and effort.
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](../CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
|
||||
183
docs/faq.md
183
docs/faq.md
@@ -1,18 +1,26 @@
|
||||
# FAQ
|
||||
|
||||
!!! info "How to Reinstall"
|
||||
|
||||
Many issues can be resolved by re-installing the application. You won't lose any data by re-installing. We suggest downloading the [latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) and using it to re-install the application. Consult the [installer guide](./installation/installer.md) for more information.
|
||||
|
||||
When you run the installer, you'll have an option to select the version to install. If you aren't ready to upgrade, you choose the current version to fix a broken install.
|
||||
|
||||
If the troubleshooting steps on this page don't get you up and running, please either [create an issue] or hop on [discord] for help.
|
||||
|
||||
## How to Install
|
||||
|
||||
Follow the [Quick Start guide](./installation/quick_start.md) to install Invoke.
|
||||
You can download the latest installers [here](https://github.com/invoke-ai/InvokeAI/releases).
|
||||
|
||||
Note that any releases marked as _pre-release_ are in a beta state. You may experience some issues, but we appreciate your help testing those! For stable/reliable installations, please install the [latest release].
|
||||
|
||||
## Downloading models and using existing models
|
||||
|
||||
The Model Manager tab in the UI provides a few ways to install models, including using your already-downloaded models. You'll see a popup directing you there on first startup. For more information, see the [model install docs].
|
||||
|
||||
## Missing models after updating from v3
|
||||
## Missing models after updating to v4
|
||||
|
||||
If you find some models are missing after updating from v3, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
|
||||
If you find some models are missing after updating to v4, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
|
||||
|
||||
You can use the `Scan Folder` tab in the Model Manager UI to fix this. The models will either be in the old, now-unused `autoimport` folder, or your `models` folder.
|
||||
|
||||
@@ -29,27 +37,115 @@ Follow the same steps to scan and import the missing models.
|
||||
## Slow generation
|
||||
|
||||
- Check the [system requirements] to ensure that your system is capable of generating images.
|
||||
- Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
|
||||
- Check that your generations are happening on your GPU (if you have one). Invoke will log what is being used for generation upon startup. If your GPU isn't used, re-install to and ensure you select the appropriate GPU option.
|
||||
- If you are on Windows with an Nvidia GPU, you may have exceeded your GPU's VRAM capacity and are triggering Nvidia's "sysmem fallback". There's a guide to opt out of this behaviour in the [Low-VRAM mode guide](./features/low-vram.md).
|
||||
- Check the `ram` setting in `invokeai.yaml`. This setting tells Invoke how much of your system RAM can be used to cache models. Having this too high or too low can slow things down. That said, it's generally safest to not set this at all and instead let Invoke manage it.
|
||||
- Check the `vram` setting in `invokeai.yaml`. This setting tells Invoke how much of your GPU VRAM can be used to cache models. Counter-intuitively, if this setting is too high, Invoke will need to do a lot of shuffling of models as it juggles the VRAM cache and the currently-loaded model. The default value of 0.25 is generally works well for GPUs without 16GB or more VRAM. Even on a 24GB card, the default works well.
|
||||
- Check that your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup. If your GPU isn't used, re-install to ensure the correct versions of torch get installed.
|
||||
- If you are on Windows, you may have exceeded your GPU's VRAM capacity and are using slower [shared GPU memory](#shared-gpu-memory-windows). There's a guide to opt out of this behaviour in the linked FAQ entry.
|
||||
|
||||
## Shared GPU Memory (Windows)
|
||||
|
||||
!!! tip "Nvidia GPUs with driver 536.40"
|
||||
|
||||
This only applies to current Nvidia cards with driver 536.40 or later, released in June 2023.
|
||||
|
||||
When the GPU doesn't have enough VRAM for a task, Windows is able to allocate some of its CPU RAM to the GPU. This is much slower than VRAM, but it does allow the system to generate when it otherwise might no have enough VRAM.
|
||||
|
||||
When shared GPU memory is used, generation slows down dramatically - but at least it doesn't crash.
|
||||
|
||||
If you'd like to opt out of this behavior and instead get an error when you exceed your GPU's VRAM, follow [this guide from Nvidia](https://nvidia.custhelp.com/app/answers/detail/a_id/5490).
|
||||
|
||||
Here's how to get the python path required in the linked guide:
|
||||
|
||||
- Run `invoke.bat`.
|
||||
- Select option 2 for developer console.
|
||||
- At least one python path will be printed. Copy the path that includes your invoke installation directory (typically the first).
|
||||
|
||||
## Installer cannot find python (Windows)
|
||||
|
||||
Ensure that you checked **Add python.exe to PATH** when installing Python. This can be found at the bottom of the Python Installer window. If you already have Python installed, you can re-run the python installer, choose the Modify option and check the box.
|
||||
|
||||
## Triton error on startup
|
||||
|
||||
This can be safely ignored. Invoke doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
|
||||
This can be safely ignored. InvokeAI doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
|
||||
|
||||
## Unable to Copy on Firefox
|
||||
## Updated to 3.4.0 and xformers can’t load C++/CUDA
|
||||
|
||||
Firefox does not allow Invoke to directly access the clipboard by default. As a result, you may be unable to use certain copy functions. You can fix this by configuring Firefox to allow access to write to the clipboard:
|
||||
An issue occurred with your PyTorch update. Follow these steps to fix :
|
||||
|
||||
- Go to `about:config` and click the Accept button
|
||||
- Search for `dom.events.asyncClipboard.clipboardItem`
|
||||
- Set it to `true` by clicking the toggle button
|
||||
- Restart Firefox
|
||||
1. Launch your invoke.bat / invoke.sh and select the option to open the developer console
|
||||
2. Run:`pip install ".[xformers]" --upgrade --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121`
|
||||
- If you run into an error with `typing_extensions`, re-open the developer console and run: `pip install -U typing-extensions`
|
||||
|
||||
Note that v3.4.0 is an old, unsupported version. Please upgrade to the [latest release].
|
||||
|
||||
## Install failed and says `pip` is out of date
|
||||
|
||||
An out of date `pip` typically won't cause an installation to fail. The cause of the error can likely be found above the message that says `pip` is out of date.
|
||||
|
||||
If you saw that warning but the install went well, don't worry about it (but you can update `pip` afterwards if you'd like).
|
||||
|
||||
## Replicate image found online
|
||||
|
||||
Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc.
|
||||
|
||||
## OSErrors on Windows while installing dependencies
|
||||
|
||||
During a zip file installation or an update, installation stops with an error like this:
|
||||
|
||||
{:width="800px"}
|
||||
|
||||
To resolve this, re-install the application as described above.
|
||||
|
||||
## HuggingFace install failed due to invalid access token
|
||||
|
||||
Some HuggingFace models require you to authenticate using an [access token].
|
||||
|
||||
Invoke doesn't manage this token for you, but it's easy to set it up:
|
||||
|
||||
- Follow the instructions in the link above to create an access token. Copy it.
|
||||
- Run the launcher script.
|
||||
- Select option 2 (developer console).
|
||||
- Paste the following command:
|
||||
|
||||
```sh
|
||||
python -c "import huggingface_hub; huggingface_hub.login()"
|
||||
```
|
||||
|
||||
- Paste your access token when prompted and press Enter. You won't see anything when you paste it.
|
||||
- Type `n` if prompted about git credentials.
|
||||
|
||||
If you get an error, try the command again - maybe the token didn't paste correctly.
|
||||
|
||||
Once your token is set, start Invoke and try downloading the model again. The installer will automatically use the access token.
|
||||
|
||||
If the install still fails, you may not have access to the model.
|
||||
|
||||
## Stable Diffusion XL generation fails after trying to load UNet
|
||||
|
||||
InvokeAI is working in other respects, but when trying to generate
|
||||
images with Stable Diffusion XL you get a "Server Error". The text log
|
||||
in the launch window contains this log line above several more lines of
|
||||
error messages:
|
||||
|
||||
`INFO --> Loading model:D:\LONG\PATH\TO\MODEL, type sdxl:main:unet`
|
||||
|
||||
This failure mode occurs when there is a network glitch during
|
||||
downloading the very large SDXL model.
|
||||
|
||||
To address this, first go to the Model Manager and delete the
|
||||
Stable-Diffusion-XL-base-1.X model. Then, click the HuggingFace tab,
|
||||
paste the Repo ID stabilityai/stable-diffusion-xl-base-1.0 and install
|
||||
the model.
|
||||
|
||||
## Package dependency conflicts during installation or update
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion
|
||||
package, the installer may occasionally pick up outdated libraries and
|
||||
either the installer or `invoke` will fail with complaints about
|
||||
library conflicts.
|
||||
|
||||
To resolve this, re-install the application as described above.
|
||||
|
||||
## Invalid configuration file
|
||||
|
||||
Everything seems to install ok, you get a `ValidationError` when starting up the app.
|
||||
@@ -58,9 +154,64 @@ This is caused by an invalid setting in the `invokeai.yaml` configuration file.
|
||||
|
||||
Check the [configuration docs] for more detail about the settings and how to specify them.
|
||||
|
||||
## Out of Memory Errors
|
||||
## `ModuleNotFoundError: No module named 'controlnet_aux'`
|
||||
|
||||
The models are large, VRAM is expensive, and you may find yourself faced with Out of Memory errors when generating images. Follow our [Low-VRAM mode guide](./features/low-vram.md) to configure Invoke to prevent these.
|
||||
`controlnet_aux` is a dependency of Invoke and appears to have been packaged or distributed strangely. Sometimes, it doesn't install correctly. This is outside our control.
|
||||
|
||||
If you encounter this error, the solution is to remove the package from the `pip` cache and re-run the Invoke installer so a fresh, working version of `controlnet_aux` can be downloaded and installed:
|
||||
|
||||
- Run the Invoke launcher
|
||||
- Choose the developer console option
|
||||
- Run this command: `pip cache remove controlnet_aux`
|
||||
- Close the terminal window
|
||||
- Download and run the [installer][latest release], selecting your current install location
|
||||
|
||||
## Out of Memory Issues
|
||||
|
||||
The models are large, VRAM is expensive, and you may find yourself
|
||||
faced with Out of Memory errors when generating images. Here are some
|
||||
tips to reduce the problem:
|
||||
|
||||
!!! info "Optimizing for GPU VRAM"
|
||||
|
||||
=== "4GB VRAM GPU"
|
||||
|
||||
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||
and derived models, provided that you do not use the NSFW checker. It won't be loaded unless you go into the UI settings and turn it on.
|
||||
|
||||
If you are on a CUDA-enabled GPU, we will automatically use xformers or torch-sdp to reduce VRAM requirements, though you can explicitly configure this. See the [configuration docs].
|
||||
|
||||
=== "6GB VRAM GPU"
|
||||
|
||||
This is a border case. Using the SD 1.5 series you should be able to
|
||||
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||
1024x1024 with it disabled.
|
||||
|
||||
If you run into persistent memory issues there are a series of
|
||||
environment variables that you can set before launching InvokeAI that
|
||||
alter how the PyTorch machine learning library manages memory. See
|
||||
<https://pytorch.org/docs/stable/notes/cuda.html#memory-management> for
|
||||
a list of these tweaks.
|
||||
|
||||
=== "12GB VRAM GPU"
|
||||
|
||||
This should be sufficient to generate larger images up to about 1280x1280.
|
||||
|
||||
## Checkpoint Models Load Slowly or Use Too Much RAM
|
||||
|
||||
The difference between diffusers models (a folder containing multiple
|
||||
subfolders) and checkpoint models (a file ending with .safetensors or
|
||||
.ckpt) is that InvokeAI is able to load diffusers models into memory
|
||||
incrementally, while checkpoint models must be loaded all at
|
||||
once. With very large models, or systems with limited RAM, you may
|
||||
experience slowdowns and other memory-related issues when loading
|
||||
checkpoint models.
|
||||
|
||||
To solve this, go to the Model Manager tab (the cube), select the
|
||||
checkpoint model that's giving you trouble, and press the "Convert"
|
||||
button in the upper right of your browser window. This will conver the
|
||||
checkpoint into a diffusers model, after which loading should be
|
||||
faster and less memory-intensive.
|
||||
|
||||
## Memory Leak (Linux)
|
||||
|
||||
@@ -102,6 +253,8 @@ Note the differences between memory allocated as chunks in an arena vs. memory a
|
||||
|
||||
[model install docs]: ./installation/models.md
|
||||
[system requirements]: ./installation/requirements.md
|
||||
[latest release]: https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
[create an issue]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[configuration docs]: ./configuration.md
|
||||
[access token]: https://huggingface.co/docs/hub/security-tokens#how-to-manage-user-access-tokens
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 72 KiB |
@@ -1,176 +0,0 @@
|
||||
---
|
||||
title: Low-VRAM mode
|
||||
---
|
||||
|
||||
As of v5.6.0, Invoke has a low-VRAM mode. It works on systems with dedicated GPUs (Nvidia GPUs on Windows/Linux and AMD GPUs on Linux).
|
||||
|
||||
This allows you to generate even if your GPU doesn't have enough VRAM to hold full models. Most users should be able to run even the beefiest models - like the ~24GB unquantised FLUX dev model.
|
||||
|
||||
## Enabling Low-VRAM mode
|
||||
|
||||
To enable Low-VRAM mode, add this line to your `invokeai.yaml` configuration file, then restart Invoke:
|
||||
|
||||
```yaml
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
**Windows users should also [disable the Nvidia sysmem fallback](#disabling-nvidia-sysmem-fallback-windows-only)**.
|
||||
|
||||
It is possible to fine-tune the settings for best performance or if you still get out-of-memory errors (OOMs).
|
||||
|
||||
!!! tip "How to find `invokeai.yaml`"
|
||||
|
||||
The `invokeai.yaml` configuration file lives in your install directory. To access it, run the **Invoke Community Edition** launcher and click the install location. This will open your install directory in a file explorer window.
|
||||
|
||||
You'll see `invokeai.yaml` there and can edit it with any text editor. After making changes, restart Invoke.
|
||||
|
||||
If you don't see `invokeai.yaml`, launch Invoke once. It will create the file on its first startup.
|
||||
|
||||
## Details and fine-tuning
|
||||
|
||||
Low-VRAM mode involves 4 features, each of which can be configured or fine-tuned:
|
||||
|
||||
- Partial model loading (`enable_partial_loading`)
|
||||
- PyTorch CUDA allocator config (`pytorch_cuda_alloc_conf`)
|
||||
- Dynamic RAM and VRAM cache sizes (`max_cache_ram_gb`, `max_cache_vram_gb`)
|
||||
- Working memory (`device_working_mem_gb`)
|
||||
- Keeping a RAM weight copy (`keep_ram_copy_of_weights`)
|
||||
|
||||
Read on to learn about these features and understand how to fine-tune them for your system and use-cases.
|
||||
|
||||
### Partial model loading
|
||||
|
||||
Invoke's partial model loading works by streaming model "layers" between RAM and VRAM as they are needed.
|
||||
|
||||
When an operation needs layers that are not in VRAM, but there isn't enough room to load them, inactive layers are offloaded to RAM to make room.
|
||||
|
||||
#### Enabling partial model loading
|
||||
|
||||
As described above, you can enable partial model loading by adding this line to `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
### PyTorch CUDA allocator config
|
||||
|
||||
The PyTorch CUDA allocator's behavior can be configured using the `pytorch_cuda_alloc_conf` config. Tuning the allocator configuration can help to reduce the peak reserved VRAM. The optimal configuration is dependent on many factors (e.g. device type, VRAM, CUDA driver version, etc.), but switching from PyTorch's native allocator to using CUDA's built-in allocator works well on many systems. To try this, add the following line to your `invokeai.yaml` file:
|
||||
|
||||
```yaml
|
||||
pytorch_cuda_alloc_conf: "backend:cudaMallocAsync"
|
||||
```
|
||||
|
||||
A more complete explanation of the available configuration options is [here](https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf).
|
||||
|
||||
### Dynamic RAM and VRAM cache sizes
|
||||
|
||||
Loading models from disk is slow and can be a major bottleneck for performance. Invoke uses two model caches - RAM and VRAM - to reduce loading from disk to a minimum.
|
||||
|
||||
By default, Invoke manages these caches' sizes dynamically for best performance.
|
||||
|
||||
#### Fine-tuning cache sizes
|
||||
|
||||
Prior to v5.6.0, the cache sizes were static, and for best performance, many users needed to manually fine-tune the `ram` and `vram` settings in `invokeai.yaml`.
|
||||
|
||||
As of v5.6.0, the caches are dynamically sized. The `ram` and `vram` settings are no longer used, and new settings are added to configure the cache.
|
||||
|
||||
**Most users will not need to fine-tune the cache sizes.**
|
||||
|
||||
But, if your GPU has enough VRAM to hold models fully, you might get a perf boost by manually setting the cache sizes in `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
# The default max cache RAM size is logged on InvokeAI startup. It is determined based on your system RAM / VRAM.
|
||||
# You can override the default value by setting `max_cache_ram_gb`.
|
||||
# Increasing `max_cache_ram_gb` will increase the amount of RAM used to cache inactive models, resulting in faster model
|
||||
# reloads for the cached models.
|
||||
# As an example, if your system has 32GB of RAM and no other heavy processes, setting the `max_cache_ram_gb` to 28GB
|
||||
# might be a good value to achieve aggressive model caching.
|
||||
max_cache_ram_gb: 28
|
||||
|
||||
# The default max cache VRAM size is adjusted dynamically based on the amount of available VRAM (taking into
|
||||
# consideration the VRAM used by other processes).
|
||||
# You can override the default value by setting `max_cache_vram_gb`.
|
||||
# CAUTION: Most users should not manually set this value. See warning below.
|
||||
max_cache_vram_gb: 16
|
||||
```
|
||||
|
||||
!!! warning "Max safe value for `max_cache_vram_gb`"
|
||||
|
||||
Most users should not manually configure the `max_cache_vram_gb`. This configuration value takes precedence over the `device_working_mem_gb` and any operations that explicitly reserve additional working memory (e.g. VAE decode). As such, manually configuring it increases the likelihood of encountering out-of-memory errors.
|
||||
|
||||
For users who wish to configure `max_cache_vram_gb`, the max safe value can be determined by subtracting `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
|
||||
|
||||
For example, if you have a 12GB GPU, the max safe value for `max_cache_vram_gb` is `12GB - 3GB = 9GB`.
|
||||
|
||||
If you had increased `device_working_mem_gb` to 4GB, then the max safe value for `max_cache_vram_gb` is `12GB - 4GB = 8GB`.
|
||||
|
||||
Most users who override `max_cache_vram_gb` are doing so because they wish to use significantly less VRAM, and should be setting `max_cache_vram_gb` to a value significantly less than the 'max safe value'.
|
||||
|
||||
### Working memory
|
||||
|
||||
Invoke cannot use _all_ of your VRAM for model caching and loading. It requires some VRAM to use as working memory for various operations.
|
||||
|
||||
Invoke reserves 3GB VRAM as working memory by default, which is enough for most use-cases. However, it is possible to fine-tune this setting if you still get OOMs.
|
||||
|
||||
#### Fine-tuning working memory
|
||||
|
||||
You can increase the working memory size in `invokeai.yaml` to prevent OOMs:
|
||||
|
||||
```yaml
|
||||
# The default is 3GB - bump it up to 4GB to prevent OOMs.
|
||||
device_working_mem_gb: 4
|
||||
```
|
||||
|
||||
!!! tip "Operations may request more working memory"
|
||||
|
||||
For some operations, we can determine VRAM requirements in advance and allocate additional working memory to prevent OOMs.
|
||||
|
||||
VAE decoding is one such operation. This operation converts the generation process's output into an image. For large image outputs, this might use more than the default working memory size of 3GB.
|
||||
|
||||
During this decoding step, Invoke calculates how much VRAM will be required to decode and requests that much VRAM from the model manager. If the amount exceeds the working memory size, the model manager will offload cached model layers from VRAM until there's enough VRAM to decode.
|
||||
|
||||
Once decoding completes, the model manager "reclaims" the extra VRAM allocated as working memory for future model loading operations.
|
||||
|
||||
### Keeping a RAM weight copy
|
||||
|
||||
Invoke has the option of keeping a RAM copy of all model weights, even when they are loaded onto the GPU. This optimization is _on_ by default, and enables faster model switching and LoRA patching. Disabling this feature will reduce the average RAM load while running Invoke (peak RAM likely won't change), at the cost of slower model switching and LoRA patching. If you have limited RAM, you can disable this optimization:
|
||||
|
||||
```yaml
|
||||
# Set to false to reduce the average RAM usage at the cost of slower model switching and LoRA patching.
|
||||
keep_ram_copy_of_weights: false
|
||||
```
|
||||
|
||||
### Disabling Nvidia sysmem fallback (Windows only)
|
||||
|
||||
On Windows, Nvidia GPUs are able to use system RAM when their VRAM fills up via **sysmem fallback**. While it sounds like a good idea on the surface, in practice it causes massive slowdowns during generation.
|
||||
|
||||
It is strongly suggested to disable this feature:
|
||||
|
||||
- Open the **NVIDIA Control Panel** app.
|
||||
- Expand **3D Settings** on the left panel.
|
||||
- Click **Manage 3D Settings** in the left panel.
|
||||
- Find **CUDA - Sysmem Fallback Policy** in the right panel and set it to **Prefer No Sysmem Fallback**.
|
||||
|
||||

|
||||
|
||||
!!! tip "Invoke does the same thing, but better"
|
||||
|
||||
If the sysmem fallback feature sounds familiar, that's because Invoke's partial model loading strategy is conceptually very similar - use VRAM when there's room, else fall back to RAM.
|
||||
|
||||
Unfortunately, the Nvidia implementation is not optimized for applications like Invoke and does more harm than good.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Windows page file
|
||||
|
||||
Invoke has high virtual memory (a.k.a. 'committed memory') requirements. This can cause issues on Windows if the page file size limits are hit. (See this issue for the technical details on why this happens: https://github.com/invoke-ai/InvokeAI/issues/7563).
|
||||
|
||||
If you run out of page file space, InvokeAI may crash. Often, these crashes will happen with one of the following errors:
|
||||
|
||||
- InvokeAI exits with Windows error code `3221225477`
|
||||
- InvokeAI crashes without an error, but `eventvwr.msc` reveals an error with code `0xc0000005` (the hex equivalent of `3221225477`)
|
||||
|
||||
If you are running out of page file space, try the following solutions:
|
||||
|
||||
- Make sure that you have sufficient disk space for the page file to grow. Watch your disk usage as Invoke runs. If it climbs near 100% leading up to the crash, then this is very likely the source of the issue. Clear out some disk space to resolve the issue.
|
||||
- Make sure that your page file is set to "System managed size" (this is the default) rather than a custom size. Under the "System managed size" policy, the page file will grow dynamically as needed.
|
||||
@@ -50,9 +50,11 @@ title: Invoke
|
||||
|
||||
## Installation
|
||||
|
||||
The [Invoke Launcher](installation/quick_start.md) is the easiest way to install, update and run Invoke on Windows, macOS and Linux.
|
||||
The [installer script](installation/installer.md) is the easiest way to install and update the application.
|
||||
|
||||
You can also install Invoke as [python package](installation/manual.md) or with [docker](installation/docker.md).
|
||||
You can also install Invoke as python package [via PyPI](installation/manual.md) or [docker](installation/docker.md).
|
||||
|
||||
See the [installation section](./installation/index.md) for more information.
|
||||
|
||||
## Help
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ title: Docker
|
||||
|
||||
!!! warning "macOS users"
|
||||
|
||||
Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [launcher](./quick_start.md) instead.
|
||||
Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [installer](./installer.md) instead.
|
||||
|
||||
!!! tip "Linux and Windows Users"
|
||||
|
||||
|
||||
36
docs/installation/index.md
Normal file
36
docs/installation/index.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Installation and Updating Overview
|
||||
|
||||
Before installing, review the [installation requirements](./requirements.md) to ensure your system is set up properly.
|
||||
|
||||
See the [FAQ](../faq.md) for frequently-encountered installation issues.
|
||||
|
||||
If you need more help, join our [discord](https://discord.gg/ZmtBAhwWhy) or [create a GitHub issue](https://github.com/invoke-ai/InvokeAI/issues).
|
||||
|
||||
## Automated Installer & Updates
|
||||
|
||||
✅ The automated [installer](./installer.md) is the best way to install Invoke.
|
||||
|
||||
⬆️ The same installer is also the best way to update Invoke - simply rerun it for the same folder you installed to.
|
||||
|
||||
The installation process simply manages installation for the core libraries & application dependencies that run Invoke.
|
||||
|
||||
Models, images, or other assets in the Invoke root folder won't be affected by the installation process.
|
||||
|
||||
## Manual Install
|
||||
|
||||
If you are familiar with python and want more control over the packages that are installed, you can [install Invoke manually via PyPI](./manual.md).
|
||||
|
||||
Updates are managed by reinstalling the latest version through PyPi.
|
||||
|
||||
## Developer Install
|
||||
|
||||
If you want to contribute to InvokeAI, you'll need to set up a [dev environment](../contributing/dev-environment.md).
|
||||
|
||||
## Docker
|
||||
|
||||
Invoke publishes docker images. See the [docker installation guide](./docker.md) for details.
|
||||
|
||||
## Other Installation Guides
|
||||
|
||||
- [PyPatchMatch](./patchmatch.md)
|
||||
- [Installing Models](./models.md)
|
||||
115
docs/installation/installer.md
Normal file
115
docs/installation/installer.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Automatic Install & Updates
|
||||
|
||||
!!! tip "Use the installer to update"
|
||||
|
||||
Using the installer for updates will not erase any of your data (images, models, boards, etc). It only updates the core libraries used to run Invoke.
|
||||
|
||||
Simply use the same path you installed to originally to update your existing installation.
|
||||
|
||||
Both release and pre-release versions can be installed using the installer. It also supports install through a wheel if needed.
|
||||
|
||||
Be sure to review the [installation requirements] and ensure your system has everything it needs to install Invoke.
|
||||
|
||||
## Getting the Latest Installer
|
||||
|
||||
Download the `InvokeAI-installer-vX.Y.Z.zip` file from the [latest release] page. It is at the bottom of the page, under **Assets**.
|
||||
|
||||
After unzipping the installer, you should have a `InvokeAI-Installer` folder with some files inside, including `install.bat` and `install.sh`.
|
||||
|
||||
## Running the Installer
|
||||
|
||||
!!! tip
|
||||
|
||||
Windows users should first double-click the `WinLongPathsEnabled.reg` file to prevent a failed installation due to long file paths.
|
||||
|
||||
Double-click the install script:
|
||||
|
||||
=== "Windows"
|
||||
|
||||
```sh
|
||||
install.bat
|
||||
```
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```sh
|
||||
install.sh
|
||||
```
|
||||
|
||||
!!! info "Running the Installer from the commandline"
|
||||
|
||||
You can also run the install script from cmd/powershell (Windows) or terminal (Linux/macOS).
|
||||
|
||||
!!! warning "Untrusted Publisher (Windows)"
|
||||
|
||||
You may get a popup saying the file comes from an `Untrusted Publisher`. Click `More Info` and `Run Anyway` to get past this.
|
||||
|
||||
The installation process is simple, with a few prompts:
|
||||
|
||||
- Select the version to install. Unless you have a specific reason to install a specific version, select the default (the latest version).
|
||||
- Select location for the install. Be sure you have enough space in this folder for the base application, as described in the [installation requirements].
|
||||
- Select a GPU device.
|
||||
|
||||
!!! info "Slow Installation"
|
||||
|
||||
The installer needs to download several GB of data and install it all. It may appear to get stuck at 99.9% when installing `pytorch` or during a step labeled "Installing collected packages".
|
||||
|
||||
If it is stuck for over 10 minutes, something has probably gone wrong and you should close the window and restart.
|
||||
|
||||
## Running the Application
|
||||
|
||||
Find the install location you selected earlier. Double-click the launcher script to run the app:
|
||||
|
||||
=== "Windows"
|
||||
|
||||
```sh
|
||||
invoke.bat
|
||||
```
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```sh
|
||||
invoke.sh
|
||||
```
|
||||
|
||||
Choose the first option to run the UI. After a series of startup messages, you'll see something like this:
|
||||
|
||||
```sh
|
||||
Uvicorn running on http://127.0.0.1:9090 (Press CTRL+C to quit)
|
||||
```
|
||||
|
||||
Copy the URL into your browser and you should see the UI.
|
||||
|
||||
## Improved Outpainting with PatchMatch
|
||||
|
||||
PatchMatch is an extra add-on that can improve outpainting. Windows users are in luck - it works out of the box.
|
||||
|
||||
On macOS and Linux, a few extra steps are needed to set it up. See the [PatchMatch installation guide](./patchmatch.md).
|
||||
|
||||
## First-time Setup
|
||||
|
||||
You will need to [install some models] before you can generate.
|
||||
|
||||
Check the [configuration docs] for details on configuring the application.
|
||||
|
||||
## Updating
|
||||
|
||||
Updating is exactly the same as installing - download the latest installer, choose the latest version, enter your existing installation path, and the app will update. None of your data (images, models, boards, etc) will be erased.
|
||||
|
||||
!!! info "Dependency Resolution Issues"
|
||||
|
||||
We've found that pip's dependency resolution can cause issues when upgrading packages. One very common problem was pip "downgrading" torch from CUDA to CPU, but things broke in other novel ways.
|
||||
|
||||
The installer doesn't have this kind of problem, so we use it for updating as well.
|
||||
|
||||
## Installation Issues
|
||||
|
||||
If you have installation issues, please review the [FAQ]. You can also [create an issue] or ask for help on [discord].
|
||||
|
||||
[installation requirements]: ./requirements.md
|
||||
[FAQ]: ../faq.md
|
||||
[install some models]: ./models.md
|
||||
[configuration docs]: ../configuration.md
|
||||
[latest release]: https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
[create an issue]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
@@ -4,11 +4,11 @@
|
||||
|
||||
**Python experience is mandatory.**
|
||||
|
||||
If you want to use Invoke locally, you should probably use the [launcher](./quick_start.md).
|
||||
If you want to use Invoke locally, you should probably use the [installer](./installer.md).
|
||||
|
||||
If you want to contribute to Invoke or run the app on the latest dev branch, instead follow the [dev environment](../contributing/dev-environment.md) guide.
|
||||
If you want to contribute to Invoke, instead follow the [dev environment](../contributing/dev-environment.md) guide.
|
||||
|
||||
InvokeAI is distributed as a python package on PyPI, installable with `pip`. There are a few things that are handled by the launcher that you'll need to manage manually, described in this guide.
|
||||
InvokeAI is distributed as a python package on PyPI, installable with `pip`. There are a few things that are handled by the installer and launcher that you'll need to manage manually, described in this guide.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -16,39 +16,43 @@ Before you start, go through the [installation requirements](./requirements.md).
|
||||
|
||||
## Walkthrough
|
||||
|
||||
We'll use [`uv`](https://github.com/astral-sh/uv) to install python and create a virtual environment, then install the `invokeai` package. `uv` is a modern, very fast alternative to `pip`.
|
||||
|
||||
The following commands vary depending on the version of Invoke being installed and the system onto which it is being installed.
|
||||
|
||||
1. Install `uv` as described in its [docs](https://docs.astral.sh/uv/getting-started/installation/#standalone-installer). We suggest using the standalone installer method.
|
||||
|
||||
Run `uv --version` to confirm that `uv` is installed and working. After installation, you may need to restart your terminal to get access to `uv`.
|
||||
|
||||
2. Create a directory for your installation, typically in your home directory (e.g. `~/invokeai` or `$Home/invokeai`):
|
||||
1. Create a directory to contain your InvokeAI library, configuration files, and models. This is known as the "runtime" or "root" directory, and typically lives in your home directory under the name `invokeai`.
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
mkdir ~/invokeai
|
||||
cd ~/invokeai
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
mkdir $Home/invokeai
|
||||
cd $Home/invokeai
|
||||
```
|
||||
|
||||
3. Create a virtual environment in that directory:
|
||||
1. Enter the root directory and create a virtual Python environment within it named `.venv`.
|
||||
|
||||
```sh
|
||||
uv venv --relocatable --prompt invoke --python 3.12 --python-preference only-managed .venv
|
||||
```
|
||||
!!! warning "Virtual Environment Location"
|
||||
|
||||
This command creates a portable virtual environment at `.venv` complete with a portable python 3.12. It doesn't matter if your system has no python installed, or has a different version - `uv` will handle everything.
|
||||
While you may create the virtual environment anywhere in the file system, we recommend that you create it within the root directory as shown here. This allows the application to automatically detect its data directories.
|
||||
|
||||
4. Activate the virtual environment:
|
||||
If you choose a different location for the venv, then you _must_ set the `INVOKEAI_ROOT` environment variable or specify the root directory using the `--root` CLI arg.
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
cd ~/invokeai
|
||||
python3 -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
cd $Home/invokeai
|
||||
python3 -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
1. Activate the new environment:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
@@ -56,62 +60,41 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
=== "Windows"
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Choose a version to install. Review the [GitHub releases page](https://github.com/invoke-ai/InvokeAI/releases).
|
||||
!!! info "Permissions Error (Windows)"
|
||||
|
||||
6. Determine the package specifier to use when installing. This is a performance optimization.
|
||||
If you get a permissions error at this point, run this command and try again.
|
||||
|
||||
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
|
||||
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
|
||||
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||
|
||||
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using [UV's built in torch support.](https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection)
|
||||
The command-line prompt should change to to show `(InvokeAI)`, indicating the venv is active.
|
||||
|
||||
=== "Invoke v5.12 and later"
|
||||
1. Make sure that pip is installed in your virtual environment and up to date:
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
|
||||
- **In all other cases, do not use a torch backend.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.0.0 to v5.9.1"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v4"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
|
||||
```bash
|
||||
python3 -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
|
||||
1. Install the InvokeAI Package. The base command is `pip install InvokeAI --use-pep517`, but you may need to change this depending on your system and the desired features.
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
|
||||
```
|
||||
- You may need to provide an [extra index URL](https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-extra-index-url). Select your platform configuration using [this tool on the PyTorch website](https://pytorch.org/get-started/locally/). Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
|
||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
- If you have a CUDA GPU and want to install with `xformers`, you need to add an option to the package name. Note that `xformers` is not strictly necessary. PyTorch includes an implementation of the SDP attention algorithm with similar performance for most GPUs.
|
||||
|
||||
```bash
|
||||
pip install "InvokeAI[xformers]" --use-pep517
|
||||
```
|
||||
|
||||
1. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
@@ -119,31 +102,17 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
deactivate && source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
=== "Windows"
|
||||
|
||||
```ps
|
||||
deactivate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
10. Run the application, specifying the directory you created earlier as the root directory:
|
||||
1. Run the application:
|
||||
|
||||
=== "Linux/macOS"
|
||||
Run `invokeai-web` to start the UI. You must activate the virtual environment before running the app.
|
||||
|
||||
```bash
|
||||
invokeai-web --root ~/invokeai
|
||||
```
|
||||
!!! warning
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
invokeai-web --root $Home/invokeai
|
||||
```
|
||||
|
||||
## Headless Install and Launch Scripts
|
||||
|
||||
If you run Invoke on a headless server, you might want to install and run Invoke on the command line.
|
||||
|
||||
We do not plan to maintain scripts to do this moving forward, instead focusing our dev resources on the GUI [launcher](../installation/quick_start.md).
|
||||
|
||||
You can create your own scripts for this by copying the handful of commands in this guide. `uv`'s [`pip` interface docs](https://docs.astral.sh/uv/reference/cli/#uv-pip-install) may be useful.
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
|
||||
@@ -97,16 +97,16 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
||||
sudo pacman -S --needed base-devel
|
||||
```
|
||||
|
||||
2. Install `opencv`, `blas`, and required dependencies:
|
||||
2. Install `opencv` and `blas`:
|
||||
|
||||
```sh
|
||||
sudo pacman -S opencv blas fmt glew vtk hdf5
|
||||
sudo pacman -S opencv blas
|
||||
```
|
||||
|
||||
or for CUDA support
|
||||
|
||||
```sh
|
||||
sudo pacman -S opencv-cuda blas fmt glew vtk hdf5
|
||||
sudo pacman -S opencv-cuda blas
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
# Invoke Community Edition Quick Start
|
||||
|
||||
Welcome to Invoke! Follow these steps to install, update, and get started creating.
|
||||
|
||||
## Step 1: System Requirements
|
||||
|
||||
Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size. The requirements below are rough guidelines.
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
|
||||
!!! info "Hardware Requirements (Windows/Linux)"
|
||||
|
||||
=== "SD1.5 - 512×512"
|
||||
|
||||
- GPU: Nvidia 10xx series or later, 4GB+ VRAM.
|
||||
- Memory: At least 8GB RAM.
|
||||
- Disk: 10GB for base installation plus 30GB for models.
|
||||
|
||||
=== "SDXL - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 100GB for models.
|
||||
|
||||
=== "FLUX - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 10GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 200GB for models.
|
||||
|
||||
More detail on system requirements can be found [here](./requirements.md).
|
||||
|
||||
## Step 2: Download and Set Up the Launcher
|
||||
|
||||
The Launcher manages your Invoke install. Follow these instructions to download and set up the Launcher.
|
||||
|
||||
!!! info "Instructions for each OS"
|
||||
|
||||
=== "Windows"
|
||||
|
||||
- [Download for Windows](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe)
|
||||
- Run the `EXE` to install the Launcher and start it.
|
||||
- A desktop shortcut will be created; use this to run the Launcher in the future.
|
||||
- You can delete the `EXE` file you downloaded.
|
||||
|
||||
=== "macOS"
|
||||
|
||||
- [Download for macOS](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg)
|
||||
- Open the `DMG` and drag the app into `Applications`.
|
||||
- Run the Launcher using its entry in `Applications`.
|
||||
- You can delete the `DMG` file you downloaded.
|
||||
|
||||
=== "Linux"
|
||||
|
||||
- [Download for Linux](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage)
|
||||
- You may need to edit the `AppImage` file properties and make it executable.
|
||||
- Optionally move the file to a location that does not require admin privileges and add a desktop shortcut for it.
|
||||
- Run the Launcher by double-clicking the `AppImage` or the shortcut you made.
|
||||
|
||||
## Step 3: Install Invoke
|
||||
|
||||
Run the Launcher you just set up if you haven't already. Click **Install** and follow the instructions to install (or update) Invoke.
|
||||
|
||||
If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation.
|
||||
|
||||
!!! tip "Updating"
|
||||
|
||||
The Launcher will check for updates for itself _and_ Invoke.
|
||||
|
||||
- When the Launcher detects an update is available for itself, you'll get a small popup window. Click through this and the Launcher will update itself.
|
||||
- When the Launcher detects an update for Invoke, you'll see a small green alert in the Launcher. Click that and follow the instructions to update Invoke.
|
||||
|
||||
## Step 4: Launch
|
||||
|
||||
Once installed, click **Finish**, then **Launch** to start Invoke.
|
||||
|
||||
The very first run after an installation or update will take a few extra moments to get ready.
|
||||
|
||||
!!! tip "Server Mode"
|
||||
|
||||
The launcher runs Invoke as a desktop application. You can enable **Server Mode** in the launcher's settings to disable this and instead access the UI through your web browser.
|
||||
|
||||
## Step 5: Install Models
|
||||
|
||||
With Invoke started up, you'll need to install some models.
|
||||
|
||||
The quickest way to get started is to install a **Starter Model** bundle. If you already have a model collection, Invoke can use it.
|
||||
|
||||
!!! info "Install Models"
|
||||
|
||||
=== "Install a Starter Model bundle"
|
||||
|
||||
1. Go to the **Models** tab.
|
||||
2. Click **Starter Models** on the right.
|
||||
3. Click one of the bundles to install its models. Refer to the [system requirements](#step-1-confirm-system-requirements) if you're unsure which model architecture will work for your system.
|
||||
|
||||
=== "Use my model collection"
|
||||
|
||||
4. Go to the **Models** tab.
|
||||
5. Click **Scan Folder** on the right.
|
||||
6. Paste the path to your models collection and click **Scan Folder**.
|
||||
7. With **In-place install** enabled, Invoke will leave the model files where they are. If you disable this, **Invoke will move the models into its own folders**.
|
||||
|
||||
You’re now ready to start creating!
|
||||
|
||||
## Step 6: Learn the Basics
|
||||
|
||||
We recommend watching our [Getting Started Playlist](https://www.youtube.com/playlist?list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO). It covers essential features and workflows, including:
|
||||
|
||||
- Generating your first image.
|
||||
- Using control layers and reference guides.
|
||||
- Refining images with advanced workflows.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If installation fails, retrying the install in Repair Mode may fix it. There's a checkbox to enable this on the Review step of the install flow.
|
||||
|
||||
If that doesn't fix it, [clearing the `uv` cache](https://docs.astral.sh/uv/reference/cli/#uv-cache-clean) might do the trick:
|
||||
|
||||
- Open and start the dev console (button at the bottom-left of the launcher).
|
||||
- Run `uv cache clean`.
|
||||
- Retry the installation. Enable Repair Mode for good measure.
|
||||
|
||||
If you are still unable to install, try installing to a different location and see if that works.
|
||||
|
||||
If you still have problems, ask for help on the Invoke [discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
## Other Installation Methods
|
||||
|
||||
- You can install the Invoke application as a python package. See our [manual install](./manual.md) docs.
|
||||
- You can run Invoke with docker. See our [docker install](./docker.md) docs.
|
||||
|
||||
## Need Help?
|
||||
|
||||
- Visit our [Support Portal](https://support.invoke.ai).
|
||||
- Watch the [Getting Started Playlist](https://www.youtube.com/playlist?list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO).
|
||||
- Join the conversation on [Discord][discord link].
|
||||
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
@@ -1,35 +1,90 @@
|
||||
# Requirements
|
||||
|
||||
Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
## GPU
|
||||
|
||||
## Hardware
|
||||
!!! warning "Problematic Nvidia GPUs"
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size.
|
||||
We do not recommend these GPUs. They cannot operate with half precision, but have insufficient VRAM to generate 512x512 images at full precision.
|
||||
|
||||
The requirements below are rough guidelines for best performance. GPUs with less VRAM typically still work, if a bit slower. Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
|
||||
- NVIDIA 10xx series cards such as the 1080 TI
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
Invoke runs best with a dedicated GPU, but will fall back to running on CPU, albeit much slower. You'll need a beefier GPU for SDXL.
|
||||
|
||||
!!! info "Hardware Requirements (Windows/Linux)"
|
||||
!!! example "Stable Diffusion 1.5"
|
||||
|
||||
=== "SD1.5 - 512×512"
|
||||
=== "Nvidia"
|
||||
|
||||
- GPU: Nvidia 10xx series or later, 4GB+ VRAM.
|
||||
- Memory: At least 8GB RAM.
|
||||
- Disk: 10GB for base installation plus 30GB for models.
|
||||
```
|
||||
Any GPU with at least 4GB VRAM.
|
||||
```
|
||||
|
||||
=== "SDXL - 1024×1024"
|
||||
=== "AMD"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 100GB for models.
|
||||
```
|
||||
Any GPU with at least 4GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "FLUX - 1024×1024"
|
||||
=== "Mac"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 10GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 200GB for models.
|
||||
```
|
||||
Any Apple Silicon Mac with at least 8GB memory.
|
||||
```
|
||||
|
||||
!!! example "Stable Diffusion XL"
|
||||
|
||||
=== "Nvidia"
|
||||
|
||||
```
|
||||
Any GPU with at least 8GB VRAM.
|
||||
```
|
||||
|
||||
=== "AMD"
|
||||
|
||||
```
|
||||
Any GPU with at least 16GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "Mac"
|
||||
|
||||
```
|
||||
Any Apple Silicon Mac with at least 16GB memory.
|
||||
```
|
||||
|
||||
## RAM
|
||||
|
||||
At least 12GB of RAM.
|
||||
|
||||
## Disk
|
||||
|
||||
SSDs will, of course, offer the best performance.
|
||||
|
||||
The base application disk usage depends on the torch backend.
|
||||
|
||||
!!! example "Disk"
|
||||
|
||||
=== "Nvidia (CUDA)"
|
||||
|
||||
```
|
||||
~6.5GB
|
||||
```
|
||||
|
||||
=== "AMD (ROCm)"
|
||||
|
||||
```
|
||||
~12GB
|
||||
```
|
||||
|
||||
=== "Mac (MPS)"
|
||||
|
||||
```
|
||||
~3.5GB
|
||||
```
|
||||
|
||||
You'll need to set aside some space for images, depending on how much you generate. A couple GB is enough to get started.
|
||||
|
||||
You'll need a good chunk of space for models. Even if you only install the most popular models and the usual support models (ControlNet, IP Adapter ,etc), you will quickly hit 50GB of models.
|
||||
|
||||
!!! info "`tmpfs` on Linux"
|
||||
|
||||
@@ -37,32 +92,26 @@ The requirements below are rough guidelines for best performance. GPUs with less
|
||||
|
||||
## Python
|
||||
|
||||
!!! tip "The launcher installs python for you"
|
||||
Invoke requires python 3.10 or 3.11. If you don't already have one of these versions installed, we suggest installing 3.11, as it will be supported for longer.
|
||||
|
||||
You don't need to do this if you are installing with the [Invoke Launcher](./quick_start.md).
|
||||
Check that your system has an up-to-date Python installed by running `python --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
||||
|
||||
Invoke requires python 3.10 through 3.12. If you don't already have one of these versions installed, we suggest installing 3.12, as it will be supported for longer.
|
||||
<h3>Installing Python (Windows)</h3>
|
||||
|
||||
Check that your system has an up-to-date Python installed by running `python3 --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
||||
- Install python 3.11 with [an official installer].
|
||||
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
||||
- You may need to install [Microsoft Visual C++ Redistributable].
|
||||
|
||||
!!! info "Installing Python"
|
||||
<h3>Installing Python (macOS)</h3>
|
||||
|
||||
=== "Windows"
|
||||
- Install python 3.11 with [an official installer].
|
||||
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
||||
|
||||
- Install python with [an official installer].
|
||||
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
||||
- You may need to install [Microsoft Visual C++ Redistributable].
|
||||
<h3>Installing Python (Linux)</h3>
|
||||
|
||||
=== "macOS"
|
||||
|
||||
- Install python with [an official installer].
|
||||
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
||||
|
||||
=== "Linux"
|
||||
|
||||
- Installing python varies depending on your system. We recommend [using `uv` to manage your python installation](https://docs.astral.sh/uv/concepts/python-versions/#installing-a-python-version).
|
||||
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
- Follow the [linux install instructions], being sure to install python 3.11.
|
||||
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
|
||||
## Drivers
|
||||
|
||||
@@ -126,4 +175,7 @@ An alternative to installing ROCm locally is to use a [ROCm docker container] to
|
||||
[ROCm Documentation]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html
|
||||
[cuDNN support matrix]: https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html
|
||||
[Nvidia Container Runtime]: https://developer.nvidia.com/container-runtime
|
||||
[linux install instructions]: https://docs.python-guide.org/starting/install3/linux/
|
||||
[Microsoft Visual C++ Redistributable]: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
[an official installer]: https://www.python.org/downloads/
|
||||
[CUDA Toolkit Downloads]: https://developer.nvidia.com/cuda-downloads
|
||||
|
||||
@@ -41,7 +41,7 @@ Nodes have a "Use Cache" option in their footer. This allows for performance imp
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
### Create Latent Noise
|
||||
### Noise
|
||||
|
||||
An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input.
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ If you'd prefer, you can also just download the whole node folder from the linke
|
||||
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
- Community Nodes
|
||||
+ [Anamorphic Tools](#anamorphic-tools)
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
@@ -21,12 +20,8 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
+ [Clothing Mask](#clothing-mask)
|
||||
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||
+ [Curves](#curves)
|
||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||
+ [Enhance Detail](#enhance-detail)
|
||||
+ [Film Grain](#film-grain)
|
||||
+ [Flip Pose](#flip-pose)
|
||||
+ [Flux Ideal Size](#flux-ideal-size)
|
||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
@@ -45,7 +40,6 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [Metadata-Linked](#metadata-linked-nodes)
|
||||
+ [Negative Image](#negative-image)
|
||||
+ [Nightmare Promptgen](#nightmare-promptgen)
|
||||
+ [Ollama](#ollama-node)
|
||||
+ [One Button Prompt](#one-button-prompt)
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
@@ -53,7 +47,6 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [BriaAI Background Remove](#briaai-remove-background)
|
||||
+ [Remove Background](#remove-background)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Stereogram](#stereogram-nodes)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
+ [Simple Skin Detection](#simple-skin-detection)
|
||||
+ [Text font to Image](#text-font-to-image)
|
||||
@@ -65,13 +58,6 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
- [Help](#help)
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Anamorphic Tools
|
||||
|
||||
**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/anamorphic-tools
|
||||
|
||||
--------------------------------
|
||||
### Adapters Linked Nodes
|
||||
|
||||
@@ -94,7 +80,7 @@ Note: These are inherited from the core nodes so any update to the core nodes sh
|
||||
|
||||
**Example Usage:**
|
||||
</br>
|
||||
<img src="https://raw.githubusercontent.com/skunkworxdark/autostereogram_nodes/refs/heads/main/images/spider.png" width="200" /> -> <img src="https://raw.githubusercontent.com/skunkworxdark/autostereogram_nodes/refs/heads/main/images/spider-depth.png" width="200" /> -> <img src="https://raw.githubusercontent.com/skunkworxdark/autostereogram_nodes/refs/heads/main/images/spider-dots.png" width="200" /> <img src="https://raw.githubusercontent.com/skunkworxdark/autostereogram_nodes/refs/heads/main/images/spider-pattern.png" width="200" />
|
||||
<img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider-depth.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-dots.png" width="200" /> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-pattern.png" width="200" />
|
||||
|
||||
--------------------------------
|
||||
### Average Images
|
||||
@@ -143,13 +129,6 @@ Node Link: https://github.com/VeyDlin/clahe-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Curves
|
||||
|
||||
**Description:** Adjust an image's curve based on a user-defined string.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/curves-node
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
@@ -162,17 +141,6 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
|
||||
**Example Usage:**
|
||||
</br><img src="https://raw.githubusercontent.com/dwringer/depth-from-obj-node/main/depth_from_obj_usage.jpg" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Enhance Detail
|
||||
|
||||
**Description:** A single node that can enhance the detail in an image. Increase or decrease details in an image using a guided filter (as opposed to the typical Gaussian blur used by most sharpening filters.) Based on the `Enhance Detail` ComfyUI node from https://github.com/spacepxl/ComfyUI-Image-Filters
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/enhance-detail-node
|
||||
|
||||
**Example Usage:**
|
||||
</br>
|
||||
<img src="https://raw.githubusercontent.com/skunkworxdark/enhance-detail-node/refs/heads/main/images/Comparison.png" />
|
||||
|
||||
--------------------------------
|
||||
### Film Grain
|
||||
|
||||
@@ -180,20 +148,6 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||
|
||||
--------------------------------
|
||||
### Flip Pose
|
||||
|
||||
**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flip-pose-node
|
||||
|
||||
--------------------------------
|
||||
### Flux Ideal Size
|
||||
|
||||
**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flux-ideal-size
|
||||
|
||||
--------------------------------
|
||||
### Generative Grammar-Based Prompt Nodes
|
||||
|
||||
@@ -353,7 +307,7 @@ View:
|
||||
**Node Link:** https://github.com/helix4u/load_video_frame
|
||||
|
||||
**Output Example:**
|
||||
<img src="https://raw.githubusercontent.com/helix4u/load_video_frame/refs/heads/main/_git_assets/dance1736978273.gif" width="500" />
|
||||
<img src="https://raw.githubusercontent.com/helix4u/load_video_frame/main/_git_assets/testmp4_embed_converted.gif" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Make 3D
|
||||
@@ -394,7 +348,7 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
**Output Examples**
|
||||
|
||||
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" />
|
||||
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Metadata Linked Nodes
|
||||
@@ -436,23 +390,10 @@ View:
|
||||
|
||||
**Node Link:** [https://github.com/gogurtenjoyer/nightmare-promptgen](https://github.com/gogurtenjoyer/nightmare-promptgen)
|
||||
|
||||
--------------------------------
|
||||
### Ollama Node
|
||||
|
||||
**Description:** Uses Ollama API to expand text prompts for text-to-image generation using local LLMs. Works great for expanding basic prompts into detailed natural language prompts for Flux. Also provides a toggle to unload the LLM model immediately after expanding, to free up VRAM for Invoke to continue the image generation workflow.
|
||||
|
||||
**Node Link:** https://github.com/Jonseed/Ollama-Node
|
||||
|
||||
**Example Node Graph:** https://github.com/Jonseed/Ollama-Node/blob/main/Ollama-Node-Flux-example.json
|
||||
|
||||
**View:**
|
||||
|
||||

|
||||
|
||||
--------------------------------
|
||||
### One Button Prompt
|
||||
|
||||
<img src="https://raw.githubusercontent.com/AIrjen/OneButtonPrompt_X_InvokeAI/refs/heads/main/images/background.png" width="800" />
|
||||
<img src="https://github.com/AIrjen/OneButtonPrompt_X_InvokeAI/blob/main/images/background.png" width="800" />
|
||||
|
||||
**Description:** an extensive suite of auto prompt generation and prompt helper nodes based on extensive logic. Get creative with the best prompt generator in the world.
|
||||
|
||||
@@ -462,7 +403,7 @@ The main node generates interesting prompts based on a set of parameters. There
|
||||
|
||||
**Nodes:**
|
||||
|
||||
<img src="https://raw.githubusercontent.com/AIrjen/OneButtonPrompt_X_InvokeAI/refs/heads/main/images/OBP_nodes_invokeai.png" width="800" />
|
||||
<img src="https://github.com/AIrjen/OneButtonPrompt_X_InvokeAI/blob/main/images/OBP_nodes_invokeai.png" width="800" />
|
||||
|
||||
--------------------------------
|
||||
### Oobabooga
|
||||
@@ -515,7 +456,7 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
**Workflow Examples**
|
||||
|
||||
<img src="https://raw.githubusercontent.com/skunkworxdark/prompt-tools/refs/heads/main/images/CSVToIndexStringNode.png"/>
|
||||
<img src="https://github.com/skunkworxdark/prompt-tools/blob/main/images/CSVToIndexStringNode.png" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Remote Image
|
||||
@@ -559,16 +500,6 @@ View:
|
||||
|
||||
<img src="https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Stereogram Nodes
|
||||
|
||||
**Description:** A set of custom nodes for InvokeAI to create cross-view or parallel-view stereograms. Stereograms are 2D images that, when viewed properly, reveal a 3D scene. Check out [r/crossview](https://www.reddit.com/r/CrossView/) for tutorials.
|
||||
|
||||
**Node Link:** https://github.com/simonfuhrmann/invokeai-stereo
|
||||
|
||||
**Example Workflow and Output**
|
||||
</br><img src="https://raw.githubusercontent.com/simonfuhrmann/invokeai-stereo/refs/heads/main/docs/example_promo_03.jpg" width="600" />
|
||||
|
||||
--------------------------------
|
||||
### Simple Skin Detection
|
||||
|
||||
@@ -663,7 +594,7 @@ See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/READ
|
||||
|
||||
**Output Examples**
|
||||
|
||||
<img src="https://raw.githubusercontent.com/skunkworxdark/XYGrid_nodes/refs/heads/main/images/collage.png" />
|
||||
<img src="https://github.com/skunkworxdark/XYGrid_nodes/blob/main/images/collage.png" width="300" />
|
||||
|
||||
|
||||
--------------------------------
|
||||
|
||||
@@ -99,6 +99,7 @@ their descriptions.
|
||||
| Scale Latents | Scales latents by a given factor. |
|
||||
| Segment Anything Processor | Applies segment anything processing to image |
|
||||
| Show Image | Displays a provided image, and passes it forward in the pipeline. |
|
||||
| Step Param Easing | Experimental per-step parameter easing for denoising steps |
|
||||
| String Primitive Collection | A collection of string primitive values |
|
||||
| String Primitive | A string primitive value |
|
||||
| Subtract Integers | Subtracts two numbers |
|
||||
|
||||
6
flake.lock
generated
6
flake.lock
generated
@@ -2,11 +2,11 @@
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1727955264,
|
||||
"narHash": "sha256-lrd+7mmb5NauRoMa8+J1jFKYVa+rc8aq2qc9+CxPDKc=",
|
||||
"lastModified": 1690630721,
|
||||
"narHash": "sha256-Y04onHyBQT4Erfr2fc82dbJTfXGYrf4V0ysLUYnPOP8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "71cd616696bd199ef18de62524f3df3ffe8b9333",
|
||||
"rev": "d2b52322f35597c62abf56de91b0236746b2a03d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
cudaPackages.cudnn
|
||||
cudaPackages.cuda_nvrtc
|
||||
cudatoolkit
|
||||
pkg-config
|
||||
pkgconfig
|
||||
libconfig
|
||||
cmake
|
||||
blas
|
||||
@@ -66,7 +66,7 @@
|
||||
black
|
||||
|
||||
# Frontend.
|
||||
pnpm_8
|
||||
yarn
|
||||
nodejs
|
||||
];
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
|
||||
BIN
installer/WinLongPathsEnabled.reg
Normal file
BIN
installer/WinLongPathsEnabled.reg
Normal file
Binary file not shown.
@@ -32,18 +32,18 @@ if [[ ! -z ${CI} ]]; then
|
||||
echo
|
||||
echo -e "${BCYAN}CI environment detected${RESET}"
|
||||
echo
|
||||
else
|
||||
echo
|
||||
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
|
||||
echo "The current working directory is $(pwd)"
|
||||
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
|
||||
echo
|
||||
fi
|
||||
|
||||
echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show HEAD
|
||||
echo
|
||||
|
||||
# If the classifiers are invalid, publishing to PyPI will fail but the build will succeed.
|
||||
# It's a fast check, do it early.
|
||||
echo "Checking pyproject classifiers..."
|
||||
python3 ./check_classifiers.py ../pyproject.toml
|
||||
echo
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
@@ -77,8 +77,42 @@ fi
|
||||
|
||||
rm -rf ../build
|
||||
|
||||
python3 -m build --outdir ../dist/ ../.
|
||||
python3 -m build --outdir dist/ ../.
|
||||
|
||||
# ----------------------
|
||||
|
||||
echo
|
||||
echo "Building installer zip files for InvokeAI ${VERSION}..."
|
||||
echo
|
||||
|
||||
# get rid of any old ones
|
||||
rm -f *.zip
|
||||
rm -rf InvokeAI-Installer
|
||||
|
||||
# copy content
|
||||
mkdir InvokeAI-Installer
|
||||
for f in templates *.txt *.reg; do
|
||||
cp -r ${f} InvokeAI-Installer/
|
||||
done
|
||||
mkdir InvokeAI-Installer/lib
|
||||
cp lib/*.py InvokeAI-Installer/lib
|
||||
|
||||
# Install scripts
|
||||
# Mac/Linux
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+x InvokeAI-Installer/install.sh
|
||||
|
||||
# Windows
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
FILENAME=InvokeAI-installer-$VERSION.zip
|
||||
|
||||
# Zip everything up
|
||||
zip -r ${FILENAME} InvokeAI-Installer
|
||||
|
||||
echo
|
||||
echo -e "${BGREEN}Built installer: ./${FILENAME}${RESET}"
|
||||
echo -e "${BGREEN}Built PyPi distribution: ./dist${RESET}"
|
||||
|
||||
# clean up, but only if we are not in a github action
|
||||
@@ -91,7 +125,9 @@ fi
|
||||
if [[ ! -z ${CI} ]]; then
|
||||
echo
|
||||
echo "Setting GitHub action outputs..."
|
||||
echo "DIST_PATH=./dist/" >>$GITHUB_OUTPUT
|
||||
echo "INSTALLER_FILENAME=${FILENAME}" >>$GITHUB_OUTPUT
|
||||
echo "INSTALLER_PATH=installer/${FILENAME}" >>$GITHUB_OUTPUT
|
||||
echo "DIST_PATH=installer/dist/" >>$GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
exit 0
|
||||
128
installer/install.bat.in
Normal file
128
installer/install.bat.in
Normal file
@@ -0,0 +1,128 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
@rem This script requires the user to install Python 3.10 or higher. All other
|
||||
@rem requirements are downloaded as needed.
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
@rem Config
|
||||
@rem The version in the next line is replaced by an up to date release number
|
||||
@rem when create_installer.sh is run. Change the release number there.
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/help/FAQ/
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
set MINIMUM_PYTHON_VERSION=3.10.0
|
||||
set PYTHON_URL=https://www.python.org/downloads/release/python-3109/
|
||||
|
||||
set err_msg=An error has occurred and the script could not continue.
|
||||
|
||||
@rem --------------------------- Intro -------------------------------
|
||||
echo This script will install InvokeAI and its dependencies.
|
||||
echo.
|
||||
echo BEFORE YOU START PLEASE MAKE SURE TO DO THE FOLLOWING
|
||||
echo 1. Install python 3.10 or 3.11. Python version 3.9 is no longer supported.
|
||||
echo 2. Double-click on the file WinLongPathsEnabled.reg in order to
|
||||
echo enable long path support on your system.
|
||||
echo 3. Install the Visual C++ core libraries.
|
||||
echo Please download and install the libraries from:
|
||||
echo https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
echo.
|
||||
echo See %INSTRUCTIONS% for more details.
|
||||
echo.
|
||||
echo FOR THE BEST USER EXPERIENCE WE SUGGEST MAXIMIZING THIS WINDOW NOW.
|
||||
pause
|
||||
|
||||
@rem ---------------------------- check Python version ---------------
|
||||
echo ***** Checking and Updating Python *****
|
||||
|
||||
call python --version >.tmp1 2>.tmp2
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Please install Python 3.10-11. See %INSTRUCTIONS% for details.
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i
|
||||
if "%python_version%" == "" (
|
||||
set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.12 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version%
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.12 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
@rem -------------- Install and Configure ---------------
|
||||
|
||||
call python .\lib\main.py
|
||||
pause
|
||||
exit /b
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
@rem found at https://stackoverflow.com/questions/15807762/compare-version-numbers-in-batch-file
|
||||
:compareVersions
|
||||
::
|
||||
:: Compares two version numbers and returns the result in the ERRORLEVEL
|
||||
::
|
||||
:: Returns 1 if version1 > version2
|
||||
:: 0 if version1 = version2
|
||||
:: -1 if version1 < version2
|
||||
::
|
||||
:: The nodes must be delimited by . or , or -
|
||||
::
|
||||
:: Nodes are normally strictly numeric, without a 0 prefix. A letter suffix
|
||||
:: is treated as a separate node
|
||||
::
|
||||
setlocal enableDelayedExpansion
|
||||
set "v1=%~1"
|
||||
set "v2=%~2"
|
||||
call :divideLetters v1
|
||||
call :divideLetters v2
|
||||
:loop
|
||||
call :parseNode "%v1%" n1 v1
|
||||
call :parseNode "%v2%" n2 v2
|
||||
if %n1% gtr %n2% exit /b 1
|
||||
if %n1% lss %n2% exit /b -1
|
||||
if not defined v1 if not defined v2 exit /b 0
|
||||
if not defined v1 exit /b -1
|
||||
if not defined v2 exit /b 1
|
||||
goto :loop
|
||||
|
||||
|
||||
:parseNode version nodeVar remainderVar
|
||||
for /f "tokens=1* delims=.,-" %%A in ("%~1") do (
|
||||
set "%~2=%%A"
|
||||
set "%~3=%%B"
|
||||
)
|
||||
exit /b
|
||||
|
||||
|
||||
:divideLetters versionVar
|
||||
for %%C in (a b c d e f g h i j k l m n o p q r s t u v w x y z) do set "%~1=!%~1:%%C=.%%C!"
|
||||
exit /b
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
echo The installer will exit now.
|
||||
pause
|
||||
exit /b
|
||||
|
||||
pause
|
||||
|
||||
:Trim
|
||||
SetLocal EnableDelayedExpansion
|
||||
set Params=%*
|
||||
for /f "tokens=1*" %%a in ("!Params!") do EndLocal & set %1=%%b
|
||||
exit /b
|
||||
39
installer/install.sh.in
Executable file
39
installer/install.sh.in
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# make sure we are not already in a venv
|
||||
# (don't need to check status)
|
||||
deactivate >/dev/null 2>&1
|
||||
scriptdir=$(dirname "$0")
|
||||
cd $scriptdir
|
||||
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
MINIMUM_PYTHON_VERSION=3.10.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.100
|
||||
PYTHON=""
|
||||
for candidate in python3.11 python3.10 python3 python ; do
|
||||
if ppath=`which $candidate`; then
|
||||
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
|
||||
# we check that this found executable can actually run
|
||||
if [ $($candidate --version &>/dev/null; echo ${PIPESTATUS}) -gt 0 ]; then continue; fi
|
||||
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
if [ $(version $python_version) -le $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$PYTHON" ]; then
|
||||
echo "A suitable Python interpreter could not be found"
|
||||
echo "Please install Python $MINIMUM_PYTHON_VERSION or higher (maximum $MAXIMUM_PYTHON_VERSION) before running this script. See instructions at $INSTRUCTIONS for help."
|
||||
echo "For the best user experience we suggest enlarging or maximizing this window now."
|
||||
read -p "Press any key to exit"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
exec $PYTHON ./lib/main.py ${@}
|
||||
read -p "Press any key to exit"
|
||||
435
installer/lib/installer.py
Normal file
435
installer/lib/installer.py
Normal file
@@ -0,0 +1,435 @@
|
||||
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
||||
"""
|
||||
InvokeAI installer script
|
||||
"""
|
||||
|
||||
import locale
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import venv
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Optional, Tuple
|
||||
|
||||
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
|
||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
||||
DOCS_URL = "https://invoke-ai.github.io/InvokeAI/"
|
||||
DISCORD_URL = "https://discord.gg/ZmtBAhwWhy"
|
||||
|
||||
OS = platform.uname().system
|
||||
ARCH = platform.uname().machine
|
||||
VERSION = "latest"
|
||||
|
||||
|
||||
def get_version_from_wheel_filename(wheel_filename: str) -> str:
|
||||
match = re.search(r"-(\d+\.\d+\.\d+)", wheel_filename)
|
||||
if match:
|
||||
version = match.group(1)
|
||||
return version
|
||||
else:
|
||||
raise ValueError(f"Could not extract version from wheel filename: {wheel_filename}")
|
||||
|
||||
|
||||
class Installer:
|
||||
"""
|
||||
Deploys an InvokeAI installation into a given path
|
||||
"""
|
||||
|
||||
reqs: list[str] = INSTALLER_REQS
|
||||
|
||||
def __init__(self) -> None:
|
||||
if os.getenv("VIRTUAL_ENV") is not None:
|
||||
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
||||
sys.exit(-1)
|
||||
self.bootstrap()
|
||||
self.available_releases = get_github_releases()
|
||||
|
||||
def mktemp_venv(self) -> TemporaryDirectory[str]:
|
||||
"""
|
||||
Creates a temporary virtual environment for the installer itself
|
||||
|
||||
:return: path to the created virtual environment directory
|
||||
:rtype: TemporaryDirectory
|
||||
"""
|
||||
|
||||
# Cleaning up temporary directories on Windows results in a race condition
|
||||
# and a stack trace.
|
||||
# `ignore_cleanup_errors` was only added in Python 3.10
|
||||
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
|
||||
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
|
||||
else:
|
||||
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX)
|
||||
|
||||
venv.create(venv_dir.name, with_pip=True)
|
||||
self.venv_dir = venv_dir
|
||||
set_sys_path(Path(venv_dir.name))
|
||||
|
||||
return venv_dir
|
||||
|
||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory[str] | None:
|
||||
"""
|
||||
Bootstrap the installer venv with packages required at install time
|
||||
"""
|
||||
|
||||
print("Initializing the installer. This may take a minute - please wait...")
|
||||
|
||||
venv_dir = self.mktemp_venv()
|
||||
pip = get_pip_from_venv(Path(venv_dir.name))
|
||||
|
||||
cmd = [pip, "install", "--require-virtualenv", "--use-pep517"]
|
||||
cmd.extend(self.reqs)
|
||||
|
||||
try:
|
||||
# upgrade pip to the latest version to avoid a confusing message
|
||||
res = upgrade_pip(Path(venv_dir.name))
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
# run the install prerequisites installation
|
||||
res = subprocess.check_output(cmd).decode()
|
||||
|
||||
if verbose:
|
||||
print(res)
|
||||
|
||||
return venv_dir
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
|
||||
def app_venv(self, venv_parent: Path) -> Path:
|
||||
"""
|
||||
Create a virtualenv for the InvokeAI installation
|
||||
"""
|
||||
|
||||
venv_dir = venv_parent / ".venv"
|
||||
|
||||
# Prefer to copy python executables
|
||||
# so that updates to system python don't break InvokeAI
|
||||
try:
|
||||
venv.create(venv_dir, with_pip=True)
|
||||
# If installing over an existing environment previously created with symlinks,
|
||||
# the executables will fail to copy. Keep symlinks in that case
|
||||
except shutil.SameFileError:
|
||||
venv.create(venv_dir, with_pip=True, symlinks=True)
|
||||
|
||||
return venv_dir
|
||||
|
||||
def install(
|
||||
self,
|
||||
root: str = "~/invokeai",
|
||||
yes_to_all: bool = False,
|
||||
find_links: Optional[str] = None,
|
||||
wheel: Optional[Path] = None,
|
||||
) -> None:
|
||||
"""Install the InvokeAI application into the given runtime path
|
||||
|
||||
Args:
|
||||
root: Destination path for the installation
|
||||
yes_to_all: Accept defaults to all questions
|
||||
find_links: A local directory to search for requirement wheels before going to remote indexes
|
||||
wheel: A wheel file to install
|
||||
"""
|
||||
|
||||
import messages
|
||||
|
||||
if wheel:
|
||||
messages.installing_from_wheel(wheel.name)
|
||||
version = get_version_from_wheel_filename(wheel.name)
|
||||
else:
|
||||
messages.welcome(self.available_releases)
|
||||
version = messages.choose_version(self.available_releases)
|
||||
|
||||
auto_dest = Path(os.environ.get("INVOKEAI_ROOT", root)).expanduser().resolve()
|
||||
destination = auto_dest if yes_to_all else messages.dest_path(root)
|
||||
if destination is None:
|
||||
print("Could not find or create the destination directory. Installation cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
# create the venv for the app
|
||||
self.venv = self.app_venv(venv_parent=destination)
|
||||
|
||||
self.instance = InvokeAiInstance(runtime=destination, venv=self.venv, version=version)
|
||||
|
||||
# install dependencies and the InvokeAI application
|
||||
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
||||
self.instance.install(extra_index_url, optional_modules, find_links, wheel)
|
||||
|
||||
# install the launch/update scripts into the runtime directory
|
||||
self.instance.install_user_scripts()
|
||||
|
||||
message = f"""
|
||||
*** Installation Successful ***
|
||||
|
||||
To start the application, run:
|
||||
{destination}/invoke.{"bat" if sys.platform == "win32" else "sh"}
|
||||
|
||||
For more information, troubleshooting and support, visit our docs at:
|
||||
{DOCS_URL}
|
||||
|
||||
Join the community on Discord:
|
||||
{DISCORD_URL}
|
||||
"""
|
||||
print(message)
|
||||
|
||||
|
||||
class InvokeAiInstance:
|
||||
"""
|
||||
Manages an installed instance of InvokeAI, comprising a virtual environment and a runtime directory.
|
||||
The virtual environment *may* reside within the runtime directory.
|
||||
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: Path, venv: Path, version: str = "stable") -> None:
|
||||
self.runtime = runtime
|
||||
self.venv = venv
|
||||
self.pip = get_pip_from_venv(venv)
|
||||
self.version = version
|
||||
|
||||
set_sys_path(venv)
|
||||
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
||||
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
||||
upgrade_pip(venv)
|
||||
|
||||
def get(self) -> tuple[Path, Path]:
|
||||
"""
|
||||
Get the location of the virtualenv directory for this installation
|
||||
|
||||
:return: Paths of the runtime and the venv directory
|
||||
:rtype: tuple[Path, Path]
|
||||
"""
|
||||
|
||||
return (self.runtime, self.venv)
|
||||
|
||||
def install(
|
||||
self,
|
||||
extra_index_url: Optional[str] = None,
|
||||
optional_modules: Optional[str] = None,
|
||||
find_links: Optional[str] = None,
|
||||
wheel: Optional[Path] = None,
|
||||
):
|
||||
"""Install the package from PyPi or a wheel, if provided.
|
||||
|
||||
Args:
|
||||
extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||
optional_modules: optional modules to install using "[module1,module2]" format.
|
||||
find_links: path to a directory containing wheels to be searched prior to going to the internet
|
||||
wheel: a wheel file to install
|
||||
"""
|
||||
|
||||
import messages
|
||||
|
||||
# not currently used, but may be useful for "install most recent version" option
|
||||
if self.version == "prerelease":
|
||||
version = None
|
||||
pre_flag = "--pre"
|
||||
elif self.version == "stable":
|
||||
version = None
|
||||
pre_flag = None
|
||||
else:
|
||||
version = self.version
|
||||
pre_flag = None
|
||||
|
||||
src = "invokeai"
|
||||
if optional_modules:
|
||||
src += optional_modules
|
||||
if version:
|
||||
src += f"=={version}"
|
||||
|
||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||
|
||||
from plumbum import FG, ProcessExecutionError, local
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
pipeline = pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--force-reinstall",
|
||||
"--use-pep517",
|
||||
str(src) if not wheel else str(wheel),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
extra_index_url,
|
||||
pre_flag if not wheel else None, # Ignore the flag if we are installing a wheel
|
||||
]
|
||||
|
||||
try:
|
||||
_ = pipeline & FG
|
||||
except ProcessExecutionError as e:
|
||||
print(f"Error: {e}")
|
||||
print(
|
||||
"Could not install InvokeAI. Please try downloading the latest version of the installer and install again."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def install_user_scripts(self):
|
||||
"""
|
||||
Copy the launch and update scripts to the runtime dir
|
||||
"""
|
||||
|
||||
ext = "bat" if OS == "Windows" else "sh"
|
||||
|
||||
scripts = ["invoke"]
|
||||
|
||||
for script in scripts:
|
||||
src = Path(__file__).parent / ".." / "templates" / f"{script}.{ext}.in"
|
||||
dest = self.runtime / f"{script}.{ext}"
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
def remove(self):
|
||||
pass
|
||||
|
||||
|
||||
### Utility functions ###
|
||||
|
||||
|
||||
def get_pip_from_venv(venv_path: Path) -> str:
|
||||
"""
|
||||
Given a path to a virtual environment, get the absolute path to the `pip` executable
|
||||
in a cross-platform fashion. Does not validate that the pip executable
|
||||
actually exists in the virtualenv.
|
||||
|
||||
:param venv_path: Path to the virtual environment
|
||||
:type venv_path: Path
|
||||
:return: Absolute path to the pip executable
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
pip = "Scripts\\pip.exe" if OS == "Windows" else "bin/pip"
|
||||
return str(venv_path.expanduser().resolve() / pip)
|
||||
|
||||
|
||||
def upgrade_pip(venv_path: Path) -> str | None:
|
||||
"""
|
||||
Upgrade the pip executable in the given virtual environment
|
||||
"""
|
||||
|
||||
python = "Scripts\\python.exe" if OS == "Windows" else "bin/python"
|
||||
python = str(venv_path.expanduser().resolve() / python)
|
||||
|
||||
try:
|
||||
result = subprocess.check_output([python, "-m", "pip", "install", "--upgrade", "pip"]).decode(
|
||||
encoding=locale.getpreferredencoding()
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(e)
|
||||
result = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def set_sys_path(venv_path: Path) -> None:
|
||||
"""
|
||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||
such that packages from the given venv may be imported in the current process.
|
||||
Ensure that the packages from system environment are not visible (emulate
|
||||
the virtual env 'activate' script) - this doesn't work on Windows yet.
|
||||
|
||||
:param venv_path: Path to the virtual environment
|
||||
:type venv_path: Path
|
||||
"""
|
||||
|
||||
# filter out any paths in sys.path that may be system- or user-wide
|
||||
# but leave the temporary bootstrap virtualenv as it contains packages we
|
||||
# temporarily need at install time
|
||||
sys.path = list(filter(lambda p: not p.endswith("-packages") or p.find(BOOTSTRAP_VENV_PREFIX) != -1, sys.path))
|
||||
|
||||
# determine site-packages/lib directory location for the venv
|
||||
lib = "Lib" if OS == "Windows" else f"lib/python{sys.version_info.major}.{sys.version_info.minor}"
|
||||
|
||||
# add the site-packages location to the venv
|
||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||
|
||||
|
||||
def get_github_releases() -> tuple[list[str], list[str]] | None:
|
||||
"""
|
||||
Query Github for published (pre-)release versions.
|
||||
Return a tuple where the first element is a list of stable releases and the second element is a list of pre-releases.
|
||||
Return None if the query fails for any reason.
|
||||
"""
|
||||
|
||||
import requests
|
||||
|
||||
## get latest releases using github api
|
||||
url = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||
releases: list[str] = []
|
||||
pre_releases: list[str] = []
|
||||
try:
|
||||
res = requests.get(url)
|
||||
res.raise_for_status()
|
||||
tag_info = res.json()
|
||||
for tag in tag_info:
|
||||
if not tag["prerelease"]:
|
||||
releases.append(tag["tag_name"].lstrip("v"))
|
||||
else:
|
||||
pre_releases.append(tag["tag_name"].lstrip("v"))
|
||||
except requests.HTTPError as e:
|
||||
print(f"Error: {e}")
|
||||
print("Could not fetch version information from GitHub. Please check your network connection and try again.")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
print("An unexpected error occurred while trying to fetch version information from GitHub. Please try again.")
|
||||
return
|
||||
|
||||
releases.sort(reverse=True)
|
||||
pre_releases.sort(reverse=True)
|
||||
|
||||
return releases, pre_releases
|
||||
|
||||
|
||||
def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
"""
|
||||
Determine the extra index URL for pip to use for torch installation.
|
||||
This depends on the OS and the graphics accelerator in use.
|
||||
This is only applicable to Windows and Linux, since PyTorch does not
|
||||
offer accelerated builds for macOS.
|
||||
|
||||
Prefer CUDA-enabled wheels if the user wasn't sure of their GPU, as it will fallback to CPU if possible.
|
||||
|
||||
A NoneType return means just go to PyPi.
|
||||
|
||||
:return: tuple consisting of (extra index url or None, optional modules to load or None)
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
from messages import select_gpu
|
||||
|
||||
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||
device = select_gpu()
|
||||
|
||||
# The correct extra index URLs for torch are inconsistent, see https://pytorch.org/get-started/locally/#start-locally
|
||||
|
||||
url = None
|
||||
optional_modules: str | None = None
|
||||
if OS == "Linux":
|
||||
if device.value == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||
elif device.value == "cpu":
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
elif device.value == "cuda":
|
||||
# CUDA uses the default PyPi index
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
elif OS == "Windows":
|
||||
if device.value == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu121"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
elif device.value == "cpu":
|
||||
# CPU uses the default PyPi index, no optional modules
|
||||
pass
|
||||
elif OS == "Darwin":
|
||||
# macOS uses the default PyPi index, no optional modules
|
||||
pass
|
||||
|
||||
# Fall back to defaults
|
||||
|
||||
return (url, optional_modules)
|
||||
57
installer/lib/main.py
Normal file
57
installer/lib/main.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""
|
||||
InvokeAI Installer
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from installer import Installer
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"-r",
|
||||
"--root",
|
||||
dest="root",
|
||||
type=str,
|
||||
help="Destination path for installation",
|
||||
default=os.environ.get("INVOKEAI_ROOT") or "~/invokeai",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y",
|
||||
"--yes",
|
||||
"--yes-to-all",
|
||||
dest="yes_to_all",
|
||||
action="store_true",
|
||||
help="Assume default answers to all questions",
|
||||
default=False,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--find-links",
|
||||
dest="find_links",
|
||||
help="Specifies a directory of local wheel files to be searched prior to searching the online repositories.",
|
||||
type=Path,
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--wheel",
|
||||
dest="wheel",
|
||||
help="Specifies a wheel for the InvokeAI package. Used for troubleshooting or testing prereleases.",
|
||||
type=Path,
|
||||
default=None,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
inst = Installer()
|
||||
|
||||
try:
|
||||
inst.install(**args.__dict__)
|
||||
except KeyboardInterrupt:
|
||||
print("\n")
|
||||
print("Ctrl-C pressed. Aborting.")
|
||||
print("Come back soon!")
|
||||
338
installer/lib/messages.py
Normal file
338
installer/lib/messages.py
Normal file
@@ -0,0 +1,338 @@
|
||||
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
||||
"""
|
||||
Installer user interaction
|
||||
"""
|
||||
|
||||
import os
|
||||
import platform
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from prompt_toolkit import prompt
|
||||
from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
|
||||
from prompt_toolkit.validation import Validator
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Confirm
|
||||
from rich.style import Style
|
||||
from rich.syntax import Syntax
|
||||
from rich.text import Text
|
||||
|
||||
OS = platform.uname().system
|
||||
ARCH = platform.uname().machine
|
||||
|
||||
if OS == "Windows":
|
||||
# Windows terminals look better without a background colour
|
||||
console = Console(style=Style(color="grey74"))
|
||||
else:
|
||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||
|
||||
|
||||
def welcome(available_releases: tuple[list[str], list[str]] | None = None) -> None:
|
||||
@group()
|
||||
def text():
|
||||
if (platform_specific := _platform_specific_help()) is not None:
|
||||
yield platform_specific
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
||||
justify="center",
|
||||
)
|
||||
if available_releases is not None:
|
||||
latest_stable = available_releases[0][0]
|
||||
last_pre = available_releases[1][0]
|
||||
yield ""
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Latest stable release (recommended): [b bright_white]{latest_stable}", justify="center"
|
||||
)
|
||||
yield Text.from_markup(
|
||||
f"[red3]🠶[/] Last published pre-release version: [b bright_white]{last_pre}", justify="center"
|
||||
)
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
Panel(
|
||||
title="[bold wheat1]Welcome to the InvokeAI Installer",
|
||||
renderable=text(),
|
||||
box=box.DOUBLE,
|
||||
expand=True,
|
||||
padding=(1, 2),
|
||||
style=Style(bgcolor="grey23", color="orange1"),
|
||||
subtitle=f"[bold grey39]{OS}-{ARCH}",
|
||||
)
|
||||
)
|
||||
console.line()
|
||||
|
||||
|
||||
def installing_from_wheel(wheel_filename: str) -> None:
|
||||
"""Display a message about installing from a wheel"""
|
||||
|
||||
@group()
|
||||
def text():
|
||||
yield Text.from_markup(f"You are installing from a wheel file: [bold]{wheel_filename}\n")
|
||||
yield Text.from_markup(
|
||||
"[bold orange3]If you are not sure why you are doing this, you should cancel and install InvokeAI normally."
|
||||
)
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
title="Installing from Wheel",
|
||||
renderable=text(),
|
||||
box=box.DOUBLE,
|
||||
expand=True,
|
||||
padding=(1, 2),
|
||||
)
|
||||
)
|
||||
|
||||
should_proceed = Confirm.ask("Do you want to proceed?")
|
||||
|
||||
if not should_proceed:
|
||||
console.print("Installation cancelled.")
|
||||
exit()
|
||||
|
||||
|
||||
def choose_version(available_releases: tuple[list[str], list[str]] | None = None) -> str:
|
||||
"""
|
||||
Prompt the user to choose an Invoke version to install
|
||||
"""
|
||||
|
||||
# short circuit if we couldn't get a version list
|
||||
# still try to install the latest stable version
|
||||
if available_releases is None:
|
||||
return "stable"
|
||||
|
||||
console.print(":grey_question: [orange3]Please choose an Invoke version to install.")
|
||||
|
||||
choices = available_releases[0] + available_releases[1]
|
||||
|
||||
response = prompt(
|
||||
message=f" <Enter> to install the recommended release ({choices[0]}). <Tab> or type to pick a version: ",
|
||||
complete_while_typing=True,
|
||||
completer=FuzzyWordCompleter(choices),
|
||||
)
|
||||
console.print(f" Version {choices[0] if response == '' else response} will be installed.")
|
||||
|
||||
console.line()
|
||||
|
||||
return "stable" if response == "" else response
|
||||
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":stop_sign: Directory {dest} already exists!")
|
||||
print(" Is this location correct?")
|
||||
default = False
|
||||
else:
|
||||
print(f":file_folder: InvokeAI will be installed in {dest}")
|
||||
default = True
|
||||
|
||||
dest_confirmed = Confirm.ask(" Please confirm:", default=default)
|
||||
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
|
||||
|
||||
def dest_path(dest: Optional[str | Path] = None) -> Path | None:
|
||||
"""
|
||||
Prompt the user for the destination path and create the path
|
||||
|
||||
:param dest: a filesystem path, defaults to None
|
||||
:type dest: str, optional
|
||||
:return: absolute path to the created installation directory
|
||||
:rtype: Path
|
||||
"""
|
||||
|
||||
if dest is not None:
|
||||
dest = Path(dest).expanduser().resolve()
|
||||
else:
|
||||
dest = Path.cwd().expanduser().resolve()
|
||||
prev_dest = init_path = dest
|
||||
dest_confirmed = False
|
||||
|
||||
while not dest_confirmed:
|
||||
browse_start = (dest or Path.cwd()).expanduser().resolve()
|
||||
|
||||
path_completer = PathCompleter(
|
||||
only_directories=True,
|
||||
expanduser=True,
|
||||
get_paths=lambda: [str(browse_start)], # noqa: B023
|
||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||
)
|
||||
|
||||
console.line()
|
||||
|
||||
console.print(f":grey_question: [orange3]Please select the install destination:[/] \\[{browse_start}]: ")
|
||||
selected = prompt(
|
||||
">>> ",
|
||||
complete_in_thread=True,
|
||||
completer=path_completer,
|
||||
default=str(browse_start) + os.sep,
|
||||
vi_mode=True,
|
||||
complete_while_typing=True,
|
||||
# Test that this is not needed on Windows
|
||||
# complete_style=CompleteStyle.READLINE_LIKE,
|
||||
)
|
||||
prev_dest = dest
|
||||
dest = Path(selected)
|
||||
|
||||
console.line()
|
||||
|
||||
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
||||
|
||||
if not dest_confirmed:
|
||||
dest = prev_dest
|
||||
|
||||
dest = dest.expanduser().resolve()
|
||||
|
||||
try:
|
||||
dest.mkdir(exist_ok=True, parents=True)
|
||||
return dest
|
||||
except PermissionError:
|
||||
console.print(
|
||||
f"Failed to create directory {dest} due to insufficient permissions",
|
||||
style=Style(color="red"),
|
||||
highlight=True,
|
||||
)
|
||||
except OSError:
|
||||
console.print_exception()
|
||||
|
||||
if Confirm.ask("Would you like to try again?"):
|
||||
dest_path(init_path)
|
||||
else:
|
||||
console.rule("Goodbye!")
|
||||
|
||||
|
||||
class GpuType(Enum):
|
||||
CUDA = "cuda"
|
||||
ROCM = "rocm"
|
||||
CPU = "cpu"
|
||||
|
||||
|
||||
def select_gpu() -> GpuType:
|
||||
"""
|
||||
Prompt the user to select the GPU driver
|
||||
"""
|
||||
|
||||
if ARCH == "arm64" and OS != "Darwin":
|
||||
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
||||
return GpuType.CPU
|
||||
|
||||
nvidia = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||
GpuType.CUDA,
|
||||
)
|
||||
amd = (
|
||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||
GpuType.ROCM,
|
||||
)
|
||||
cpu = (
|
||||
"Do not install any GPU support, use CPU for generation (slow)",
|
||||
GpuType.CPU,
|
||||
)
|
||||
|
||||
options = []
|
||||
if OS == "Windows":
|
||||
options = [nvidia, cpu]
|
||||
if OS == "Linux":
|
||||
options = [nvidia, amd, cpu]
|
||||
elif OS == "Darwin":
|
||||
options = [cpu]
|
||||
|
||||
if len(options) == 1:
|
||||
print(f'Your platform [gold1]{OS}-{ARCH}[/] only supports the "{options[0][1]}" driver. Proceeding with that.')
|
||||
return options[0][1]
|
||||
|
||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||
|
||||
console.rule(":space_invader: GPU (Graphics Card) selection :space_invader:")
|
||||
console.print(
|
||||
Panel(
|
||||
Group(
|
||||
"\n".join(
|
||||
[
|
||||
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
|
||||
"",
|
||||
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
|
||||
"",
|
||||
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
|
||||
]
|
||||
),
|
||||
"",
|
||||
"Please select the type of GPU installed in your computer.",
|
||||
Panel(
|
||||
"\n".join([f"[dark_goldenrod b i]{i}[/] [dark_red]🢒[/]{opt[0]}" for (i, opt) in options.items()]),
|
||||
box=box.MINIMAL,
|
||||
),
|
||||
),
|
||||
box=box.MINIMAL,
|
||||
padding=(1, 1),
|
||||
)
|
||||
)
|
||||
choice = prompt(
|
||||
"Please make your selection: ",
|
||||
validator=Validator.from_callable(
|
||||
lambda n: n in options.keys(), error_message="Please select one the above options"
|
||||
),
|
||||
)
|
||||
|
||||
return options[choice][1]
|
||||
|
||||
|
||||
def simple_banner(message: str) -> None:
|
||||
"""
|
||||
A simple banner with a message, defined here for styling consistency
|
||||
|
||||
:param message: The message to display
|
||||
:type message: str
|
||||
"""
|
||||
|
||||
console.rule(message)
|
||||
|
||||
|
||||
# TODO this does not yet work correctly
|
||||
def windows_long_paths_registry() -> None:
|
||||
"""
|
||||
Display a message about applying the Windows long paths registry fix
|
||||
"""
|
||||
|
||||
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
||||
syntax = Syntax(code.read(), line_numbers=True, lexer="regedit")
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
Group(
|
||||
"\n".join(
|
||||
[
|
||||
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
||||
"",
|
||||
"This is the change that will be applied:",
|
||||
str(syntax),
|
||||
]
|
||||
)
|
||||
),
|
||||
title="Windows Long Paths registry fix",
|
||||
box=box.HORIZONTALS,
|
||||
padding=(1, 1),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _platform_specific_help() -> Text | None:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
||||
)
|
||||
elif OS == "Windows":
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
|
||||
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
|
||||
enable long path support on your system.
|
||||
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
||||
)
|
||||
else:
|
||||
return
|
||||
return text
|
||||
52
installer/readme.txt
Normal file
52
installer/readme.txt
Normal file
@@ -0,0 +1,52 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Preparations:
|
||||
|
||||
You will need to install Python 3.10 or higher for this installer
|
||||
to work. Instructions are given here:
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
|
||||
Before you start the installer, please open up your system's command
|
||||
line window (Terminal or Command) and type the commands:
|
||||
|
||||
python --version
|
||||
|
||||
If all is well, it will print "Python 3.X.X", where the version number
|
||||
is at least 3.10.*, and not higher than 3.11.*.
|
||||
|
||||
If this works, check the version of the Python package manager, pip:
|
||||
|
||||
pip --version
|
||||
|
||||
You should get a message that indicates that the pip package
|
||||
installer was derived from Python 3.10 or 3.11. For example:
|
||||
"pip 22.0.1 from /usr/bin/pip (python 3.10)"
|
||||
|
||||
Long Paths on Windows:
|
||||
|
||||
If you are on Windows, you will need to enable Windows Long Paths to
|
||||
run InvokeAI successfully. If you're not sure what this is, you
|
||||
almost certainly need to do this.
|
||||
|
||||
Simply double-click the "WinLongPathsEnabled.reg" file located in
|
||||
this directory, and approve the Windows warnings. Note that you will
|
||||
need to have admin privileges in order to do this.
|
||||
|
||||
Launching the installer:
|
||||
|
||||
Windows: double-click the 'install.bat' file (while keeping it inside
|
||||
the InvokeAI-Installer folder).
|
||||
|
||||
Linux and Mac: Please open the terminal application and run
|
||||
'./install.sh' (while keeping it inside the InvokeAI-Installer
|
||||
folder).
|
||||
|
||||
The installer will create a directory of your choice and install the
|
||||
InvokeAI application within it. This directory contains everything you need to run
|
||||
invokeai. Once InvokeAI is up and running, you may delete the
|
||||
InvokeAI-Installer folder at your convenience.
|
||||
|
||||
For more information, please see
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
54
installer/templates/invoke.bat.in
Normal file
54
installer/templates/invoke.bat.in
Normal file
@@ -0,0 +1,54 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
setlocal
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
set INVOKEAI_ROOT=.
|
||||
|
||||
:start
|
||||
echo Desired action:
|
||||
echo 1. Generate images with the browser-based interface
|
||||
echo 2. Open the developer console
|
||||
echo 3. Command-line help
|
||||
echo Q - Quit
|
||||
echo.
|
||||
echo To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
echo.
|
||||
set /P choice="Please enter 1-4, Q: [1] "
|
||||
if not defined choice set choice=1
|
||||
IF /I "%choice%" == "1" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python .venv\Scripts\invokeai-web.exe %*
|
||||
) ELSE IF /I "%choice%" == "2" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%choice%" == "3" (
|
||||
echo Displaying command line help...
|
||||
python .venv\Scripts\invokeai-web.exe --help %*
|
||||
pause
|
||||
exit /b
|
||||
) ELSE IF /I "%choice%" == "q" (
|
||||
echo Goodbye!
|
||||
goto ending
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
goto start
|
||||
|
||||
endlocal
|
||||
pause
|
||||
|
||||
:ending
|
||||
exit /b
|
||||
87
installer/templates/invoke.sh.in
Normal file
87
installer/templates/invoke.sh.in
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
|
||||
# MIT License
|
||||
|
||||
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
|
||||
# Copyright 2023, The InvokeAI Development Team
|
||||
|
||||
####
|
||||
# This launch script assumes that:
|
||||
# 1. it is located in the runtime directory,
|
||||
# 2. the .venv is also located in the runtime directory and is named exactly that
|
||||
#
|
||||
# If both of the above are not true, this script will likely not work as intended.
|
||||
# Activate the virtual environment and run `invoke.py` directly.
|
||||
####
|
||||
|
||||
set -eu
|
||||
|
||||
# Ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname $(readlink -f "$0"))
|
||||
cd "$scriptdir"
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
export INVOKEAI_ROOT="$scriptdir"
|
||||
|
||||
# Stash the CLI args - when we prompt for user input, `$@` is overwritten
|
||||
PARAMS=$@
|
||||
|
||||
# This setting allows torch to fall back to CPU for operations that are not supported by MPS on macOS.
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
# Primary function for the case statement to determine user input
|
||||
do_choice() {
|
||||
case $1 in
|
||||
1)
|
||||
clear
|
||||
printf "Generate images with a browser-based interface\n"
|
||||
invokeai-web $PARAMS
|
||||
;;
|
||||
2)
|
||||
clear
|
||||
printf "Open the developer console\n"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
3)
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai-web --help
|
||||
;;
|
||||
*)
|
||||
clear
|
||||
printf "Exiting...\n"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
clear
|
||||
}
|
||||
|
||||
# Command-line interface for launching Invoke functions
|
||||
do_line_input() {
|
||||
clear
|
||||
printf "What would you like to do?\n"
|
||||
printf "1: Generate images using the browser-based interface\n"
|
||||
printf "2: Open the developer console\n"
|
||||
printf "3: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
printf "To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest.\n\n"
|
||||
read -p "Please enter 1-4, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
do_choice $choice
|
||||
clear
|
||||
}
|
||||
|
||||
# Main IF statement for launching Invoke, and for checking if the user is in the developer console
|
||||
if [ "$0" != "bash" ]; then
|
||||
while true; do
|
||||
do_line_input
|
||||
done
|
||||
else # in developer console
|
||||
python --version
|
||||
printf "Press ^D to exit\n"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
||||
@@ -10,7 +10,6 @@ from invokeai.app.services.board_images.board_images_default import BoardImagesS
|
||||
from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
|
||||
from invokeai.app.services.boards.boards_default import BoardService
|
||||
from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
|
||||
from invokeai.app.services.client_state_persistence.client_state_persistence_sqlite import ClientStatePersistenceSqlite
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.download.download_default import DownloadQueueService
|
||||
from invokeai.app.services.events.events_fastapievents import FastAPIEventService
|
||||
@@ -24,10 +23,6 @@ from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_images.model_images_default import ModelImageFileStorageDisk
|
||||
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService
|
||||
from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL
|
||||
from invokeai.app.services.model_relationship_records.model_relationship_records_sqlite import (
|
||||
SqliteModelRelationshipRecordStorage,
|
||||
)
|
||||
from invokeai.app.services.model_relationships.model_relationships_default import ModelRelationshipsService
|
||||
from invokeai.app.services.names.names_default import SimpleNameService
|
||||
from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk
|
||||
from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache
|
||||
@@ -41,15 +36,7 @@ from invokeai.app.services.style_preset_images.style_preset_images_disk import S
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_sqlite import SqliteStylePresetRecordsStorage
|
||||
from invokeai.app.services.urls.urls_default import LocalUrlService
|
||||
from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_disk import WorkflowThumbnailFileStorageDisk
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
BasicConditioningInfo,
|
||||
CogView4ConditioningInfo,
|
||||
ConditioningFieldData,
|
||||
FLUXConditioningInfo,
|
||||
SD3ConditioningInfo,
|
||||
SDXLConditioningInfo,
|
||||
)
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
@@ -96,7 +83,6 @@ class ApiDependencies:
|
||||
|
||||
model_images_folder = config.models_path
|
||||
style_presets_folder = config.style_presets_path
|
||||
workflow_thumbnails_folder = config.workflow_thumbnails_path
|
||||
|
||||
db = init_db(config=config, logger=logger, image_files=image_files)
|
||||
|
||||
@@ -113,25 +99,10 @@ class ApiDependencies:
|
||||
images = ImageService()
|
||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||
tensors = ObjectSerializerForwardCache(
|
||||
ObjectSerializerDisk[torch.Tensor](
|
||||
output_folder / "tensors",
|
||||
safe_globals=[torch.Tensor],
|
||||
ephemeral=True,
|
||||
),
|
||||
ObjectSerializerDisk[torch.Tensor](output_folder / "tensors", ephemeral=True)
|
||||
)
|
||||
conditioning = ObjectSerializerForwardCache(
|
||||
ObjectSerializerDisk[ConditioningFieldData](
|
||||
output_folder / "conditioning",
|
||||
safe_globals=[
|
||||
ConditioningFieldData,
|
||||
BasicConditioningInfo,
|
||||
SDXLConditioningInfo,
|
||||
FLUXConditioningInfo,
|
||||
SD3ConditioningInfo,
|
||||
CogView4ConditioningInfo,
|
||||
],
|
||||
ephemeral=True,
|
||||
),
|
||||
ObjectSerializerDisk[ConditioningFieldData](output_folder / "conditioning", ephemeral=True)
|
||||
)
|
||||
download_queue_service = DownloadQueueService(app_config=configuration, event_bus=events)
|
||||
model_images_service = ModelImageFileStorageDisk(model_images_folder / "model_images")
|
||||
@@ -141,8 +112,6 @@ class ApiDependencies:
|
||||
download_queue=download_queue_service,
|
||||
events=events,
|
||||
)
|
||||
model_relationships = ModelRelationshipsService()
|
||||
model_relationship_records = SqliteModelRelationshipRecordStorage(db=db)
|
||||
names = SimpleNameService()
|
||||
performance_statistics = InvocationStatsService()
|
||||
session_processor = DefaultSessionProcessor(session_runner=DefaultSessionRunner())
|
||||
@@ -151,8 +120,6 @@ class ApiDependencies:
|
||||
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
||||
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
||||
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
||||
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
|
||||
client_state_persistence = ClientStatePersistenceSqlite(db=db)
|
||||
|
||||
services = InvocationServices(
|
||||
board_image_records=board_image_records,
|
||||
@@ -169,8 +136,6 @@ class ApiDependencies:
|
||||
logger=logger,
|
||||
model_images=model_images_service,
|
||||
model_manager=model_manager,
|
||||
model_relationships=model_relationships,
|
||||
model_relationship_records=model_relationship_records,
|
||||
download_queue=download_queue_service,
|
||||
names=names,
|
||||
performance_statistics=performance_statistics,
|
||||
@@ -182,8 +147,6 @@ class ApiDependencies:
|
||||
conditioning=conditioning,
|
||||
style_preset_records=style_preset_records,
|
||||
style_preset_image_files=style_preset_image_files,
|
||||
workflow_thumbnails=workflow_thumbnails,
|
||||
client_state_persistence=client_state_persistence,
|
||||
)
|
||||
|
||||
ApiDependencies.invoker = Invoker(services)
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutIDValidator
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractedMetadata:
|
||||
invokeai_metadata: str | None
|
||||
invokeai_workflow: str | None
|
||||
invokeai_graph: str | None
|
||||
|
||||
|
||||
def extract_metadata_from_image(
|
||||
pil_image: Image.Image,
|
||||
invokeai_metadata_override: str | None,
|
||||
invokeai_workflow_override: str | None,
|
||||
invokeai_graph_override: str | None,
|
||||
logger: logging.Logger,
|
||||
) -> ExtractedMetadata:
|
||||
"""
|
||||
Extracts the "invokeai_metadata", "invokeai_workflow", and "invokeai_graph" data embedded in the PIL Image.
|
||||
|
||||
These items are stored as stringified JSON in the image file's metadata, so we need to do some parsing to validate
|
||||
them. Once parsed, the values are returned as they came (as strings), or None if they are not present or invalid.
|
||||
|
||||
In some situations, we may prefer to override the values extracted from the image file with some other values.
|
||||
|
||||
For example, when uploading an image via API, the client can optionally provide the metadata directly in the request,
|
||||
as opposed to embedding it in the image file. In this case, the client-provided metadata will be used instead of the
|
||||
metadata embedded in the image file.
|
||||
|
||||
Args:
|
||||
pil_image: The PIL Image object.
|
||||
invokeai_metadata_override: The metadata override provided by the client.
|
||||
invokeai_workflow_override: The workflow override provided by the client.
|
||||
invokeai_graph_override: The graph override provided by the client.
|
||||
logger: The logger to use for debug logging.
|
||||
|
||||
Returns:
|
||||
ExtractedMetadata: The extracted metadata, workflow, and graph.
|
||||
"""
|
||||
|
||||
# The fallback value for metadata is None.
|
||||
stringified_metadata: str | None = None
|
||||
|
||||
# Use the metadata override if provided, else attempt to extract it from the image file.
|
||||
metadata_raw = invokeai_metadata_override or pil_image.info.get("invokeai_metadata", None)
|
||||
|
||||
# If the metadata is present in the image file, we will attempt to parse it as JSON. When we create images,
|
||||
# we always store metadata as a stringified JSON dict. So, we expect it to be a string here.
|
||||
if isinstance(metadata_raw, str):
|
||||
try:
|
||||
# Must be a JSON string
|
||||
metadata_parsed = json.loads(metadata_raw)
|
||||
# Must be a dict
|
||||
if isinstance(metadata_parsed, dict):
|
||||
# Looks good, overwrite the fallback value
|
||||
stringified_metadata = metadata_raw
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to parse metadata for uploaded image, {e}")
|
||||
pass
|
||||
|
||||
# We expect the workflow, if embedded in the image, to be a JSON-stringified WorkflowWithoutID. We will store it
|
||||
# as a string.
|
||||
workflow_raw: str | None = invokeai_workflow_override or pil_image.info.get("invokeai_workflow", None)
|
||||
|
||||
# The fallback value for workflow is None.
|
||||
stringified_workflow: str | None = None
|
||||
|
||||
# If the workflow is present in the image file, we will attempt to parse it as JSON. When we create images, we
|
||||
# always store workflows as a stringified JSON WorkflowWithoutID. So, we expect it to be a string here.
|
||||
if isinstance(workflow_raw, str):
|
||||
try:
|
||||
# Validate the workflow JSON before storing it
|
||||
WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||
# Looks good, overwrite the fallback value
|
||||
stringified_workflow = workflow_raw
|
||||
except Exception:
|
||||
logger.debug("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
# We expect the workflow, if embedded in the image, to be a JSON-stringified Graph. We will store it as a
|
||||
# string.
|
||||
graph_raw: str | None = invokeai_graph_override or pil_image.info.get("invokeai_graph", None)
|
||||
|
||||
# The fallback value for graph is None.
|
||||
stringified_graph: str | None = None
|
||||
|
||||
# If the graph is present in the image file, we will attempt to parse it as JSON. When we create images, we
|
||||
# always store graphs as a stringified JSON Graph. So, we expect it to be a string here.
|
||||
if isinstance(graph_raw, str):
|
||||
try:
|
||||
# TODO(psyche): Due to pydantic's handling of None values, it is possible for the graph to fail validation,
|
||||
# even if it is a direct dump of a valid graph. Node fields in the graph are allowed to have be unset if
|
||||
# they have incoming connections, but something about the ser/de process cannot adequately handle this.
|
||||
#
|
||||
# In lieu of fixing the graph validation, we will just do a simple check here to see if the graph is dict
|
||||
# with the correct keys. This is not a perfect solution, but it should be good enough for now.
|
||||
|
||||
# FIX ME: Validate the graph JSON before storing it
|
||||
# Graph.model_validate_json(graph_raw)
|
||||
|
||||
# Crappy workaround to validate JSON
|
||||
graph_parsed = json.loads(graph_raw)
|
||||
if not isinstance(graph_parsed, dict):
|
||||
raise ValueError("Not a dict")
|
||||
if not isinstance(graph_parsed.get("nodes", None), dict):
|
||||
raise ValueError("'nodes' is not a dict")
|
||||
if not isinstance(graph_parsed.get("edges", None), list):
|
||||
raise ValueError("'edges' is not a list")
|
||||
|
||||
# Looks good, overwrite the fallback value
|
||||
stringified_graph = graph_raw
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to parse graph for uploaded image, {e}")
|
||||
pass
|
||||
|
||||
return ExtractedMetadata(
|
||||
invokeai_metadata=stringified_metadata, invokeai_workflow=stringified_workflow, invokeai_graph=stringified_graph
|
||||
)
|
||||
@@ -1,7 +1,8 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from importlib.metadata import distributions
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from pathlib import Path
|
||||
from platform import python_version
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -11,7 +12,6 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||
from invokeai.backend.util.logging import logging
|
||||
@@ -40,7 +40,23 @@ class AppVersion(BaseModel):
|
||||
|
||||
version: str = Field(description="App version")
|
||||
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
accelerate: str = Field(description="accelerate version")
|
||||
compel: str = Field(description="compel version")
|
||||
cuda: Optional[str] = Field(description="CUDA version")
|
||||
diffusers: str = Field(description="diffusers version")
|
||||
numpy: str = Field(description="Numpy version")
|
||||
opencv: str = Field(description="OpenCV version")
|
||||
onnx: str = Field(description="ONNX version")
|
||||
pillow: str = Field(description="Pillow (PIL) version")
|
||||
python: str = Field(description="Python version")
|
||||
torch: str = Field(description="PyTorch version")
|
||||
torchvision: str = Field(description="PyTorch Vision version")
|
||||
transformers: str = Field(description="transformers version")
|
||||
xformers: Optional[str] = Field(description="xformers version")
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
@@ -57,23 +73,31 @@ async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
|
||||
async def get_app_deps() -> dict[str, str]:
|
||||
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
|
||||
async def get_app_deps() -> AppDependencyVersions:
|
||||
try:
|
||||
cuda = torch.version.cuda or "N/A"
|
||||
except Exception:
|
||||
cuda = "N/A"
|
||||
|
||||
deps["CUDA"] = cuda
|
||||
|
||||
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
|
||||
|
||||
return sorted_deps
|
||||
xformers = version("xformers")
|
||||
except PackageNotFoundError:
|
||||
xformers = None
|
||||
return AppDependencyVersions(
|
||||
accelerate=version("accelerate"),
|
||||
compel=version("compel"),
|
||||
cuda=torch.version.cuda,
|
||||
diffusers=version("diffusers"),
|
||||
numpy=version("numpy"),
|
||||
opencv=version("opencv-python"),
|
||||
onnx=version("onnx"),
|
||||
pillow=version("pillow"),
|
||||
python=python_version(),
|
||||
torch=torch.version.__version__,
|
||||
torchvision=version("torchvision"),
|
||||
transformers=version("transformers"),
|
||||
xformers=xformers,
|
||||
)
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
async def get_config_() -> AppConfig:
|
||||
async def get_config() -> AppConfig:
|
||||
infill_methods = ["lama", "tile", "cv2", "color"] # TODO: add mosaic back
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append("patchmatch")
|
||||
@@ -95,21 +119,6 @@ async def get_config_() -> AppConfig:
|
||||
)
|
||||
|
||||
|
||||
class InvokeAIAppConfigWithSetFields(BaseModel):
|
||||
"""InvokeAI App Config with model fields set"""
|
||||
|
||||
set_fields: set[str] = Field(description="The set fields")
|
||||
config: InvokeAIAppConfig = Field(description="The InvokeAI App Config")
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/runtime_config", operation_id="get_runtime_config", status_code=200, response_model=InvokeAIAppConfigWithSetFields
|
||||
)
|
||||
async def get_runtime_config() -> InvokeAIAppConfigWithSetFields:
|
||||
config = get_config()
|
||||
return InvokeAIAppConfigWithSetFields(set_fields=config.model_fields_set, config=config)
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/logging",
|
||||
operation_id="get_log_level",
|
||||
|
||||
@@ -1,12 +1,21 @@
|
||||
from fastapi import Body, HTTPException
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.images.images_common import AddImagesToBoardResult, RemoveImagesFromBoardResult
|
||||
|
||||
board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
|
||||
|
||||
|
||||
class AddImagesToBoardResult(BaseModel):
|
||||
board_id: str = Field(description="The id of the board the images were added to")
|
||||
added_image_names: list[str] = Field(description="The image names that were added to the board")
|
||||
|
||||
|
||||
class RemoveImagesFromBoardResult(BaseModel):
|
||||
removed_image_names: list[str] = Field(description="The image names that were removed from their board")
|
||||
|
||||
|
||||
@board_images_router.post(
|
||||
"/",
|
||||
operation_id="add_image_to_board",
|
||||
@@ -14,26 +23,17 @@ board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
|
||||
201: {"description": "The image was added to a board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=AddImagesToBoardResult,
|
||||
)
|
||||
async def add_image_to_board(
|
||||
board_id: str = Body(description="The id of the board to add to"),
|
||||
image_name: str = Body(description="The name of the image to add"),
|
||||
) -> AddImagesToBoardResult:
|
||||
):
|
||||
"""Creates a board_image"""
|
||||
try:
|
||||
added_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||
added_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
affected_boards.add(old_board_id)
|
||||
|
||||
return AddImagesToBoardResult(
|
||||
added_images=list(added_images),
|
||||
affected_boards=list(affected_boards),
|
||||
result = ApiDependencies.invoker.services.board_images.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
return result
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to add image to board")
|
||||
|
||||
@@ -45,25 +45,14 @@ async def add_image_to_board(
|
||||
201: {"description": "The image was removed from the board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=RemoveImagesFromBoardResult,
|
||||
)
|
||||
async def remove_image_from_board(
|
||||
image_name: str = Body(description="The name of the image to remove", embed=True),
|
||||
) -> RemoveImagesFromBoardResult:
|
||||
):
|
||||
"""Removes an image from its board, if it had one"""
|
||||
try:
|
||||
removed_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
removed_images.add(image_name)
|
||||
affected_boards.add("none")
|
||||
affected_boards.add(old_board_id)
|
||||
return RemoveImagesFromBoardResult(
|
||||
removed_images=list(removed_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
|
||||
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
return result
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to remove image from board")
|
||||
|
||||
@@ -83,25 +72,16 @@ async def add_images_to_board(
|
||||
) -> AddImagesToBoardResult:
|
||||
"""Adds a list of images to a board"""
|
||||
try:
|
||||
added_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
added_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.add_image_to_board(
|
||||
board_id=board_id,
|
||||
image_name=image_name,
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
added_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
affected_boards.add(old_board_id)
|
||||
|
||||
added_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return AddImagesToBoardResult(
|
||||
added_images=list(added_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
return AddImagesToBoardResult(board_id=board_id, added_image_names=added_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to add images to board")
|
||||
|
||||
@@ -120,20 +100,13 @@ async def remove_images_from_board(
|
||||
) -> RemoveImagesFromBoardResult:
|
||||
"""Removes a list of images from their board, if they had one"""
|
||||
try:
|
||||
removed_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
removed_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
removed_images.add(image_name)
|
||||
affected_boards.add("none")
|
||||
affected_boards.add(old_board_id)
|
||||
removed_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return RemoveImagesFromBoardResult(
|
||||
removed_images=list(removed_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
return RemoveImagesFromBoardResult(removed_image_names=removed_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to remove images from board")
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
from fastapi import Body, HTTPException
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from invokeai.app.services.videos_common import AddVideosToBoardResult, RemoveVideosFromBoardResult
|
||||
|
||||
board_videos_router = APIRouter(prefix="/v1/board_videos", tags=["boards"])
|
||||
|
||||
|
||||
@board_videos_router.post(
|
||||
"/batch",
|
||||
operation_id="add_videos_to_board",
|
||||
responses={
|
||||
201: {"description": "Videos were added to board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=AddVideosToBoardResult,
|
||||
)
|
||||
async def add_videos_to_board(
|
||||
board_id: str = Body(description="The id of the board to add to"),
|
||||
video_ids: list[str] = Body(description="The ids of the videos to add", embed=True),
|
||||
) -> AddVideosToBoardResult:
|
||||
"""Adds a list of videos to a board"""
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@board_videos_router.post(
|
||||
"/batch/delete",
|
||||
operation_id="remove_videos_from_board",
|
||||
responses={
|
||||
201: {"description": "Videos were removed from board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=RemoveVideosFromBoardResult,
|
||||
)
|
||||
async def remove_videos_from_board(
|
||||
video_ids: list[str] = Body(description="The ids of the videos to remove", embed=True),
|
||||
) -> RemoveVideosFromBoardResult:
|
||||
"""Removes a list of videos from their board, if they had one"""
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
@@ -5,11 +5,9 @@ from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges
|
||||
from invokeai.app.services.boards.boards_common import BoardDTO
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
|
||||
boards_router = APIRouter(prefix="/v1/boards", tags=["boards"])
|
||||
|
||||
@@ -32,7 +30,7 @@ class DeleteBoardResult(BaseModel):
|
||||
response_model=BoardDTO,
|
||||
)
|
||||
async def create_board(
|
||||
board_name: str = Query(description="The name of the board to create", max_length=300),
|
||||
board_name: str = Query(description="The name of the board to create"),
|
||||
is_private: bool = Query(default=False, description="Whether the board is private"),
|
||||
) -> BoardDTO:
|
||||
"""Creates a board"""
|
||||
@@ -88,9 +86,7 @@ async def delete_board(
|
||||
try:
|
||||
if include_images is True:
|
||||
deleted_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id=board_id,
|
||||
categories=None,
|
||||
is_intermediate=None,
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.images.delete_images_on_board(board_id=board_id)
|
||||
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
|
||||
@@ -101,9 +97,7 @@ async def delete_board(
|
||||
)
|
||||
else:
|
||||
deleted_board_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id=board_id,
|
||||
categories=None,
|
||||
is_intermediate=None,
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
|
||||
return DeleteBoardResult(
|
||||
@@ -121,8 +115,6 @@ async def delete_board(
|
||||
response_model=Union[OffsetPaginatedResults[BoardDTO], list[BoardDTO]],
|
||||
)
|
||||
async def list_boards(
|
||||
order_by: BoardRecordOrderBy = Query(default=BoardRecordOrderBy.CreatedAt, description="The attribute to order by"),
|
||||
direction: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The direction to order by"),
|
||||
all: Optional[bool] = Query(default=None, description="Whether to list all boards"),
|
||||
offset: Optional[int] = Query(default=None, description="The page offset"),
|
||||
limit: Optional[int] = Query(default=None, description="The number of boards per page"),
|
||||
@@ -130,9 +122,9 @@ async def list_boards(
|
||||
) -> Union[OffsetPaginatedResults[BoardDTO], list[BoardDTO]]:
|
||||
"""Gets a list of boards"""
|
||||
if all:
|
||||
return ApiDependencies.invoker.services.boards.get_all(order_by, direction, include_archived)
|
||||
return ApiDependencies.invoker.services.boards.get_all(include_archived)
|
||||
elif offset is not None and limit is not None:
|
||||
return ApiDependencies.invoker.services.boards.get_many(order_by, direction, offset, limit, include_archived)
|
||||
return ApiDependencies.invoker.services.boards.get_many(offset, limit, include_archived)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
@@ -146,15 +138,11 @@ async def list_boards(
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board or 'none' for uncategorized images"),
|
||||
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
) -> list[str]:
|
||||
"""Gets a list of images for a board"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id,
|
||||
categories,
|
||||
is_intermediate,
|
||||
)
|
||||
return image_names
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.backend.util.logging import logging
|
||||
|
||||
client_state_router = APIRouter(prefix="/v1/client_state", tags=["client_state"])
|
||||
|
||||
|
||||
@client_state_router.get(
|
||||
"/{queue_id}/get_by_key",
|
||||
operation_id="get_client_state_by_key",
|
||||
response_model=str | None,
|
||||
)
|
||||
async def get_client_state_by_key(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
key: str = Query(..., description="Key to get"),
|
||||
) -> str | None:
|
||||
"""Gets the client state"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.client_state_persistence.get_by_key(queue_id, key)
|
||||
except Exception as e:
|
||||
logging.error(f"Error getting client state: {e}")
|
||||
raise HTTPException(status_code=500, detail="Error setting client state")
|
||||
|
||||
|
||||
@client_state_router.post(
|
||||
"/{queue_id}/set_by_key",
|
||||
operation_id="set_client_state",
|
||||
response_model=str,
|
||||
)
|
||||
async def set_client_state(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
key: str = Query(..., description="Key to set"),
|
||||
value: str = Body(..., description="Stringified value to set"),
|
||||
) -> str:
|
||||
"""Sets the client state"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.client_state_persistence.set_by_key(queue_id, key, value)
|
||||
except Exception as e:
|
||||
logging.error(f"Error setting client state: {e}")
|
||||
raise HTTPException(status_code=500, detail="Error setting client state")
|
||||
|
||||
|
||||
@client_state_router.post(
|
||||
"/{queue_id}/delete",
|
||||
operation_id="delete_client_state",
|
||||
responses={204: {"description": "Client state deleted"}},
|
||||
)
|
||||
async def delete_client_state(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> None:
|
||||
"""Deletes the client state"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.client_state_persistence.delete(queue_id)
|
||||
except Exception as e:
|
||||
logging.error(f"Error deleting client state: {e}")
|
||||
raise HTTPException(status_code=500, detail="Error deleting client state")
|
||||
@@ -1,34 +1,23 @@
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import ClassVar, Optional
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from pydantic import BaseModel, Field, JsonValue
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ImageCategory,
|
||||
ImageNamesResult,
|
||||
ImageRecordChanges,
|
||||
ResourceOrigin,
|
||||
)
|
||||
from invokeai.app.services.images.images_common import (
|
||||
DeleteImagesResult,
|
||||
ImageDTO,
|
||||
ImageUrlsDTO,
|
||||
StarredImagesResult,
|
||||
UnstarredImagesResult,
|
||||
)
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
|
||||
@@ -37,19 +26,6 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
class ResizeToDimensions(BaseModel):
|
||||
width: int = Field(..., gt=0)
|
||||
height: int = Field(..., gt=0)
|
||||
|
||||
MAX_SIZE: ClassVar[int] = 4096 * 4096
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_total_output_size(self):
|
||||
if self.width * self.height > self.MAX_SIZE:
|
||||
raise ValueError(f"Max total output size for resizing is {self.MAX_SIZE} pixels")
|
||||
return self
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/upload",
|
||||
operation_id="upload_image",
|
||||
@@ -69,58 +45,52 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
resize_to: Optional[str] = Body(
|
||||
default=None,
|
||||
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
|
||||
examples=['"[1024,1024]"'],
|
||||
),
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||
embed=True,
|
||||
metadata: Optional[JsonValue] = Body(
|
||||
default=None, description="The metadata to associate with the image", embed=True
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type or not file.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
_metadata = None
|
||||
_workflow = None
|
||||
_graph = None
|
||||
|
||||
contents = await file.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
if crop_visible:
|
||||
try:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to crop image")
|
||||
# TODO: retain non-invokeai metadata on upload?
|
||||
# attempt to parse metadata from image
|
||||
metadata_raw = metadata if isinstance(metadata, str) else pil_image.info.get("invokeai_metadata", None)
|
||||
if isinstance(metadata_raw, str):
|
||||
_metadata = metadata_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
|
||||
if resize_to:
|
||||
try:
|
||||
dims = json.loads(resize_to)
|
||||
resize_dims = ResizeToDimensions(**dims)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
# attempt to parse workflow from image
|
||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
||||
if isinstance(workflow_raw, str):
|
||||
_workflow = workflow_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
try:
|
||||
# heuristic_resize_fast expects an RGB or RGBA image
|
||||
pil_rgba = pil_image.convert("RGBA")
|
||||
np_image = pil_to_np(pil_rgba)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to resize image")
|
||||
|
||||
extracted_metadata = extract_metadata_from_image(
|
||||
pil_image=pil_image,
|
||||
invokeai_metadata_override=metadata,
|
||||
invokeai_workflow_override=None,
|
||||
invokeai_graph_override=None,
|
||||
logger=ApiDependencies.invoker.services.logger,
|
||||
)
|
||||
# attempt to extract graph from image
|
||||
graph_raw = pil_image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph_raw, str):
|
||||
_graph = graph_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse graph for uploaded image")
|
||||
pass
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.create(
|
||||
@@ -129,9 +99,9 @@ async def upload_image(
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
metadata=extracted_metadata.invokeai_metadata,
|
||||
workflow=extracted_metadata.invokeai_workflow,
|
||||
graph=extracted_metadata.invokeai_graph,
|
||||
metadata=_metadata,
|
||||
workflow=_workflow,
|
||||
graph=_graph,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@@ -144,46 +114,18 @@ async def upload_image(
|
||||
raise HTTPException(status_code=500, detail="Failed to create image")
|
||||
|
||||
|
||||
class ImageUploadEntry(BaseModel):
|
||||
image_dto: ImageDTO = Body(description="The image DTO")
|
||||
presigned_url: str = Body(description="The URL to get the presigned URL for the image upload")
|
||||
|
||||
|
||||
@images_router.post("/", operation_id="create_image_upload_entry")
|
||||
async def create_image_upload_entry(
|
||||
width: int = Body(description="The width of the image"),
|
||||
height: int = Body(description="The height of the image"),
|
||||
board_id: Optional[str] = Body(default=None, description="The board to add this image to, if any"),
|
||||
) -> ImageUploadEntry:
|
||||
"""Uploads an image from a URL, not implemented"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@images_router.delete("/i/{image_name}", operation_id="delete_image", response_model=DeleteImagesResult)
|
||||
@images_router.delete("/i/{image_name}", operation_id="delete_image")
|
||||
async def delete_image(
|
||||
image_name: str = Path(description="The name of the image to delete"),
|
||||
) -> DeleteImagesResult:
|
||||
) -> None:
|
||||
"""Deletes an image"""
|
||||
|
||||
deleted_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
|
||||
board_id = image_dto.board_id or "none"
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
except Exception:
|
||||
# TODO: Does this need any exception handling at all?
|
||||
pass
|
||||
|
||||
return DeleteImagesResult(
|
||||
deleted_images=list(deleted_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
|
||||
|
||||
@images_router.delete("/intermediates", operation_id="clear_intermediates")
|
||||
async def clear_intermediates() -> int:
|
||||
@@ -395,52 +337,23 @@ async def list_image_dtos(
|
||||
return image_dtos
|
||||
|
||||
|
||||
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesResult)
|
||||
class DeleteImagesFromListResult(BaseModel):
|
||||
deleted_images: list[str]
|
||||
|
||||
|
||||
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesFromListResult)
|
||||
async def delete_images_from_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to delete", embed=True),
|
||||
) -> DeleteImagesResult:
|
||||
) -> DeleteImagesFromListResult:
|
||||
try:
|
||||
deleted_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
|
||||
board_id = image_dto.board_id or "none"
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesResult(
|
||||
deleted_images=list(deleted_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
@images_router.delete("/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesResult)
|
||||
async def delete_uncategorized_images() -> DeleteImagesResult:
|
||||
"""Deletes all images that are uncategorized"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id="none", categories=None, is_intermediate=None
|
||||
)
|
||||
|
||||
try:
|
||||
deleted_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
deleted_images: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.add(image_name)
|
||||
affected_boards.add("none")
|
||||
deleted_images.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesResult(
|
||||
deleted_images=list(deleted_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
@@ -449,50 +362,36 @@ class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@images_router.post("/star", operation_id="star_images_in_list", response_model=StarredImagesResult)
|
||||
@images_router.post("/star", operation_id="star_images_in_list", response_model=ImagesUpdatedFromListResult)
|
||||
async def star_images_in_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to star", embed=True),
|
||||
) -> StarredImagesResult:
|
||||
) -> ImagesUpdatedFromListResult:
|
||||
try:
|
||||
starred_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
updated_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
updated_image_dto = ApiDependencies.invoker.services.images.update(
|
||||
image_name, changes=ImageRecordChanges(starred=True)
|
||||
)
|
||||
starred_images.add(image_name)
|
||||
affected_boards.add(updated_image_dto.board_id or "none")
|
||||
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=True))
|
||||
updated_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return StarredImagesResult(
|
||||
starred_images=list(starred_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to star images")
|
||||
|
||||
|
||||
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=UnstarredImagesResult)
|
||||
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=ImagesUpdatedFromListResult)
|
||||
async def unstar_images_in_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to unstar", embed=True),
|
||||
) -> UnstarredImagesResult:
|
||||
) -> ImagesUpdatedFromListResult:
|
||||
try:
|
||||
unstarred_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
updated_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
updated_image_dto = ApiDependencies.invoker.services.images.update(
|
||||
image_name, changes=ImageRecordChanges(starred=False)
|
||||
)
|
||||
unstarred_images.add(image_name)
|
||||
affected_boards.add(updated_image_dto.board_id or "none")
|
||||
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=False))
|
||||
updated_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return UnstarredImagesResult(
|
||||
unstarred_images=list(unstarred_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to unstar images")
|
||||
|
||||
@@ -563,61 +462,3 @@ async def get_bulk_download_item(
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get("/names", operation_id="get_image_names")
|
||||
async def get_image_names(
|
||||
image_origin: Optional[ResourceOrigin] = Query(default=None, description="The origin of images to list."),
|
||||
categories: Optional[list[ImageCategory]] = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate images."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find images without a board.",
|
||||
),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
starred_first: bool = Query(default=True, description="Whether to sort by starred images first"),
|
||||
search_term: Optional[str] = Query(default=None, description="The term to search for"),
|
||||
) -> ImageNamesResult:
|
||||
"""Gets ordered list of image names with metadata for optimistic updates"""
|
||||
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.images.get_image_names(
|
||||
starred_first=starred_first,
|
||||
order_dir=order_dir,
|
||||
image_origin=image_origin,
|
||||
categories=categories,
|
||||
is_intermediate=is_intermediate,
|
||||
board_id=board_id,
|
||||
search_term=search_term,
|
||||
)
|
||||
return result
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to get image names")
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/images_by_names",
|
||||
operation_id="get_images_by_names",
|
||||
responses={200: {"model": list[ImageDTO]}},
|
||||
)
|
||||
async def get_images_by_names(
|
||||
image_names: list[str] = Body(embed=True, description="Object containing list of image names to fetch DTOs for"),
|
||||
) -> list[ImageDTO]:
|
||||
"""Gets image DTOs for the specified image names. Maintains order of input names."""
|
||||
|
||||
try:
|
||||
image_service = ApiDependencies.invoker.services.images
|
||||
|
||||
# Fetch DTOs preserving the order of requested names
|
||||
image_dtos: list[ImageDTO] = []
|
||||
for name in image_names:
|
||||
try:
|
||||
dto = image_service.get_dto(name)
|
||||
image_dtos.append(dto)
|
||||
except Exception:
|
||||
# Skip missing images - they may have been deleted between name fetch and DTO fetch
|
||||
continue
|
||||
|
||||
return image_dtos
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to get image DTOs")
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""FastAPI route for model configuration records."""
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from enum import Enum
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import List, Optional, Type
|
||||
|
||||
import huggingface_hub
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse, HTMLResponse
|
||||
from fastapi.routing import APIRouter
|
||||
@@ -20,6 +19,7 @@ from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.config import get_config
|
||||
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
@@ -27,23 +27,18 @@ from invokeai.app.services.model_records import (
|
||||
ModelRecordChanges,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.util.suppress_output import SuppressOutput
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
MainCheckpointConfig,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache_base import CacheStats
|
||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.starter_models import (
|
||||
STARTER_BUNDLES,
|
||||
STARTER_MODELS,
|
||||
StarterModel,
|
||||
StarterModelBundle,
|
||||
StarterModelWithoutDependencies,
|
||||
)
|
||||
from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel, StarterModelWithoutDependencies
|
||||
|
||||
model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"])
|
||||
|
||||
@@ -86,7 +81,6 @@ example_model_config = {
|
||||
"config_path": "string",
|
||||
"key": "string",
|
||||
"hash": "string",
|
||||
"file_size": 1,
|
||||
"description": "string",
|
||||
"source": "string",
|
||||
"converted_at": 0,
|
||||
@@ -292,7 +286,7 @@ async def get_hugging_face_models(
|
||||
)
|
||||
async def update_model_record(
|
||||
key: Annotated[str, Path(description="Unique key of model")],
|
||||
changes: Annotated[ModelRecordChanges, Body(description="Model config", examples=[example_model_input])],
|
||||
changes: Annotated[ModelRecordChanges, Body(description="Model config", example=example_model_input)],
|
||||
) -> AnyModelConfig:
|
||||
"""Update a model's config."""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
@@ -450,7 +444,7 @@ async def install_model(
|
||||
access_token: Optional[str] = Query(description="access token for the remote resource", default=None),
|
||||
config: ModelRecordChanges = Body(
|
||||
description="Object containing fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
|
||||
examples=[{"name": "string", "description": "string"}],
|
||||
example={"name": "string", "description": "string"},
|
||||
),
|
||||
) -> ModelInstallJob:
|
||||
"""Install a model using a string identifier.
|
||||
@@ -798,52 +792,90 @@ async def convert_model(
|
||||
return new_config
|
||||
|
||||
|
||||
class StarterModelResponse(BaseModel):
|
||||
starter_models: list[StarterModel]
|
||||
starter_bundles: dict[str, StarterModelBundle]
|
||||
|
||||
|
||||
def get_is_installed(
|
||||
starter_model: StarterModel | StarterModelWithoutDependencies, installed_models: list[AnyModelConfig]
|
||||
) -> bool:
|
||||
for model in installed_models:
|
||||
if model.source == starter_model.source:
|
||||
return True
|
||||
if (
|
||||
(model.name == starter_model.name or model.name in starter_model.previous_names)
|
||||
and model.base == starter_model.base
|
||||
and model.type == starter_model.type
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@model_manager_router.get("/starter_models", operation_id="get_starter_models", response_model=StarterModelResponse)
|
||||
async def get_starter_models() -> StarterModelResponse:
|
||||
@model_manager_router.get("/starter_models", operation_id="get_starter_models", response_model=list[StarterModel])
|
||||
async def get_starter_models() -> list[StarterModel]:
|
||||
installed_models = ApiDependencies.invoker.services.model_manager.store.search_by_attr()
|
||||
installed_model_sources = {m.source for m in installed_models}
|
||||
starter_models = deepcopy(STARTER_MODELS)
|
||||
starter_bundles = deepcopy(STARTER_BUNDLES)
|
||||
for model in starter_models:
|
||||
model.is_installed = get_is_installed(model, installed_models)
|
||||
if model.source in installed_model_sources:
|
||||
model.is_installed = True
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[StarterModelWithoutDependencies] = []
|
||||
|
||||
for dep in model.dependencies or []:
|
||||
if not get_is_installed(dep, installed_models):
|
||||
if dep.source not in installed_model_sources:
|
||||
missing_deps.append(dep)
|
||||
model.dependencies = missing_deps
|
||||
|
||||
for bundle in starter_bundles.values():
|
||||
for model in bundle.models:
|
||||
model.is_installed = get_is_installed(model, installed_models)
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[StarterModelWithoutDependencies] = []
|
||||
for dep in model.dependencies or []:
|
||||
if not get_is_installed(dep, installed_models):
|
||||
missing_deps.append(dep)
|
||||
model.dependencies = missing_deps
|
||||
return starter_models
|
||||
|
||||
return StarterModelResponse(starter_models=starter_models, starter_bundles=starter_bundles)
|
||||
|
||||
@model_manager_router.get(
|
||||
"/model_cache",
|
||||
operation_id="get_cache_size",
|
||||
response_model=float,
|
||||
summary="Get maximum size of model manager RAM or VRAM cache.",
|
||||
)
|
||||
async def get_cache_size(cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM)) -> float:
|
||||
"""Return the current RAM or VRAM cache size setting (in GB)."""
|
||||
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
|
||||
value = 0.0
|
||||
if cache_type == CacheType.RAM:
|
||||
value = cache.max_cache_size
|
||||
elif cache_type == CacheType.VRAM:
|
||||
value = cache.max_vram_cache_size
|
||||
return value
|
||||
|
||||
|
||||
@model_manager_router.put(
|
||||
"/model_cache",
|
||||
operation_id="set_cache_size",
|
||||
response_model=float,
|
||||
summary="Set maximum size of model manager RAM or VRAM cache, optionally writing new value out to invokeai.yaml config file.",
|
||||
)
|
||||
async def set_cache_size(
|
||||
value: float = Query(description="The new value for the maximum cache size"),
|
||||
cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM),
|
||||
persist: bool = Query(description="Write new value out to invokeai.yaml", default=False),
|
||||
) -> float:
|
||||
"""Set the current RAM or VRAM cache size setting (in GB). ."""
|
||||
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
|
||||
app_config = get_config()
|
||||
# Record initial state.
|
||||
vram_old = app_config.vram
|
||||
ram_old = app_config.ram
|
||||
|
||||
# Prepare target state.
|
||||
vram_new = vram_old
|
||||
ram_new = ram_old
|
||||
if cache_type == CacheType.RAM:
|
||||
ram_new = value
|
||||
elif cache_type == CacheType.VRAM:
|
||||
vram_new = value
|
||||
else:
|
||||
raise ValueError(f"Unexpected {cache_type=}.")
|
||||
|
||||
config_path = app_config.config_file_path
|
||||
new_config_path = config_path.with_suffix(".yaml.new")
|
||||
|
||||
try:
|
||||
# Try to apply the target state.
|
||||
cache.max_vram_cache_size = vram_new
|
||||
cache.max_cache_size = ram_new
|
||||
app_config.ram = ram_new
|
||||
app_config.vram = vram_new
|
||||
if persist:
|
||||
app_config.write_file(new_config_path)
|
||||
shutil.move(new_config_path, config_path)
|
||||
except Exception as e:
|
||||
# If there was a failure, restore the initial state.
|
||||
cache.max_cache_size = ram_old
|
||||
cache.max_vram_cache_size = vram_old
|
||||
app_config.ram = ram_old
|
||||
app_config.vram = vram_old
|
||||
|
||||
raise RuntimeError("Failed to update cache size") from e
|
||||
return value
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
@@ -856,74 +888,3 @@ async def get_stats() -> Optional[CacheStats]:
|
||||
"""Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded."""
|
||||
|
||||
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats
|
||||
|
||||
|
||||
@model_manager_router.post(
|
||||
"/empty_model_cache",
|
||||
operation_id="empty_model_cache",
|
||||
status_code=200,
|
||||
)
|
||||
async def empty_model_cache() -> None:
|
||||
"""Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped."""
|
||||
# Request 1000GB of room in order to force the cache to drop all models.
|
||||
ApiDependencies.invoker.services.logger.info("Emptying model cache.")
|
||||
ApiDependencies.invoker.services.model_manager.load.ram_cache.make_room(1000 * 2**30)
|
||||
|
||||
|
||||
class HFTokenStatus(str, Enum):
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class HFTokenHelper:
|
||||
@classmethod
|
||||
def get_status(cls) -> HFTokenStatus:
|
||||
try:
|
||||
if huggingface_hub.get_token_permission(huggingface_hub.get_token()):
|
||||
# Valid token!
|
||||
return HFTokenStatus.VALID
|
||||
# No token set
|
||||
return HFTokenStatus.INVALID
|
||||
except Exception:
|
||||
return HFTokenStatus.UNKNOWN
|
||||
|
||||
@classmethod
|
||||
def set_token(cls, token: str) -> HFTokenStatus:
|
||||
with SuppressOutput(), contextlib.suppress(Exception):
|
||||
huggingface_hub.login(token=token, add_to_git_credential=False)
|
||||
return cls.get_status()
|
||||
|
||||
@classmethod
|
||||
def reset_token(cls) -> HFTokenStatus:
|
||||
with SuppressOutput(), contextlib.suppress(Exception):
|
||||
huggingface_hub.logout()
|
||||
return cls.get_status()
|
||||
|
||||
|
||||
@model_manager_router.get("/hf_login", operation_id="get_hf_login_status", response_model=HFTokenStatus)
|
||||
async def get_hf_login_status() -> HFTokenStatus:
|
||||
token_status = HFTokenHelper.get_status()
|
||||
|
||||
if token_status is HFTokenStatus.UNKNOWN:
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
||||
|
||||
@model_manager_router.post("/hf_login", operation_id="do_hf_login", response_model=HFTokenStatus)
|
||||
async def do_hf_login(
|
||||
token: str = Body(description="Hugging Face token to use for login", embed=True),
|
||||
) -> HFTokenStatus:
|
||||
HFTokenHelper.set_token(token)
|
||||
token_status = HFTokenHelper.get_status()
|
||||
|
||||
if token_status is HFTokenStatus.UNKNOWN:
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
||||
|
||||
@model_manager_router.delete("/hf_login", operation_id="reset_hf_token", response_model=HFTokenStatus)
|
||||
async def reset_hf_token() -> HFTokenStatus:
|
||||
return HFTokenHelper.reset_token()
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
"""FastAPI route for model relationship records."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
|
||||
model_relationships_router = APIRouter(prefix="/v1/model_relationships", tags=["model_relationships"])
|
||||
|
||||
# === Schemas ===
|
||||
|
||||
|
||||
class ModelRelationshipCreateRequest(BaseModel):
|
||||
model_key_1: str = Field(
|
||||
...,
|
||||
description="The key of the first model in the relationship",
|
||||
examples=[
|
||||
"aa3b247f-90c9-4416-bfcd-aeaa57a5339e",
|
||||
"ac32b914-10ab-496e-a24a-3068724b9c35",
|
||||
"d944abfd-c7c3-42e2-a4ff-da640b29b8b4",
|
||||
"b1c2d3e4-f5a6-7890-abcd-ef1234567890",
|
||||
"12345678-90ab-cdef-1234-567890abcdef",
|
||||
"fedcba98-7654-3210-fedc-ba9876543210",
|
||||
],
|
||||
)
|
||||
model_key_2: str = Field(
|
||||
...,
|
||||
description="The key of the second model in the relationship",
|
||||
examples=[
|
||||
"3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4",
|
||||
"f0c3da4e-d9ff-42b5-a45c-23be75c887c9",
|
||||
"38170dd8-f1e5-431e-866c-2c81f1277fcc",
|
||||
"c57fea2d-7646-424c-b9ad-c0ba60fc68be",
|
||||
"10f7807b-ab54-46a9-ab03-600e88c630a1",
|
||||
"f6c1d267-cf87-4ee0-bee0-37e791eacab7",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class ModelRelationshipBatchRequest(BaseModel):
|
||||
model_keys: List[str] = Field(
|
||||
...,
|
||||
description="List of model keys to fetch related models for",
|
||||
examples=[
|
||||
[
|
||||
"aa3b247f-90c9-4416-bfcd-aeaa57a5339e",
|
||||
"ac32b914-10ab-496e-a24a-3068724b9c35",
|
||||
],
|
||||
[
|
||||
"b1c2d3e4-f5a6-7890-abcd-ef1234567890",
|
||||
"12345678-90ab-cdef-1234-567890abcdef",
|
||||
"fedcba98-7654-3210-fedc-ba9876543210",
|
||||
],
|
||||
[
|
||||
"3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4",
|
||||
],
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# === Routes ===
|
||||
|
||||
|
||||
@model_relationships_router.get(
|
||||
"/i/{model_key}",
|
||||
operation_id="get_related_models",
|
||||
response_model=list[str],
|
||||
responses={
|
||||
200: {
|
||||
"description": "A list of related model keys was retrieved successfully",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": [
|
||||
"15e9eb28-8cfe-47c9-b610-37907a79fc3c",
|
||||
"71272e82-0e5f-46d5-bca9-9a61f4bd8a82",
|
||||
"a5d7cd49-1b98-4534-a475-aeee4ccf5fa2",
|
||||
]
|
||||
}
|
||||
},
|
||||
},
|
||||
404: {"description": "The specified model could not be found"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
async def get_related_models(
|
||||
model_key: str = Path(..., description="The key of the model to get relationships for"),
|
||||
) -> list[str]:
|
||||
"""
|
||||
Get a list of model keys related to a given model.
|
||||
"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.model_relationships.get_related_model_keys(model_key)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@model_relationships_router.post(
|
||||
"/",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
responses={
|
||||
204: {"description": "The relationship was successfully created"},
|
||||
400: {"description": "Invalid model keys or self-referential relationship"},
|
||||
409: {"description": "The relationship already exists"},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"},
|
||||
},
|
||||
summary="Add Model Relationship",
|
||||
description="Creates a **bidirectional** relationship between two models, allowing each to reference the other as related.",
|
||||
)
|
||||
async def add_model_relationship(
|
||||
req: ModelRelationshipCreateRequest = Body(..., description="The model keys to relate"),
|
||||
) -> None:
|
||||
"""
|
||||
Add a relationship between two models.
|
||||
|
||||
Relationships are bidirectional and will be accessible from both models.
|
||||
|
||||
- Raises 400 if keys are invalid or identical.
|
||||
- Raises 409 if the relationship already exists.
|
||||
"""
|
||||
try:
|
||||
if req.model_key_1 == req.model_key_2:
|
||||
raise HTTPException(status_code=400, detail="Cannot relate a model to itself.")
|
||||
|
||||
ApiDependencies.invoker.services.model_relationships.add_model_relationship(
|
||||
req.model_key_1,
|
||||
req.model_key_2,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@model_relationships_router.delete(
|
||||
"/",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
responses={
|
||||
204: {"description": "The relationship was successfully removed"},
|
||||
400: {"description": "Invalid model keys or self-referential relationship"},
|
||||
404: {"description": "The relationship does not exist"},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"},
|
||||
},
|
||||
summary="Remove Model Relationship",
|
||||
description="Removes a **bidirectional** relationship between two models. The relationship must already exist.",
|
||||
)
|
||||
async def remove_model_relationship(
|
||||
req: ModelRelationshipCreateRequest = Body(..., description="The model keys to disconnect"),
|
||||
) -> None:
|
||||
"""
|
||||
Removes a bidirectional relationship between two model keys.
|
||||
|
||||
- Raises 400 if attempting to unlink a model from itself.
|
||||
- Raises 404 if the relationship was not found.
|
||||
"""
|
||||
try:
|
||||
if req.model_key_1 == req.model_key_2:
|
||||
raise HTTPException(status_code=400, detail="Cannot unlink a model from itself.")
|
||||
|
||||
ApiDependencies.invoker.services.model_relationships.remove_model_relationship(
|
||||
req.model_key_1,
|
||||
req.model_key_2,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@model_relationships_router.post(
|
||||
"/batch",
|
||||
operation_id="get_related_models_batch",
|
||||
response_model=List[str],
|
||||
responses={
|
||||
200: {
|
||||
"description": "Related model keys retrieved successfully",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": [
|
||||
"ca562b14-995e-4a42-90c1-9528f1a5921d",
|
||||
"cc0c2b8a-c62e-41d6-878e-cc74dde5ca8f",
|
||||
"18ca7649-6a9e-47d5-bc17-41ab1e8cec81",
|
||||
"7c12d1b2-0ef9-4bec-ba55-797b2d8f2ee1",
|
||||
"c382eaa3-0e28-4ab0-9446-408667699aeb",
|
||||
"71272e82-0e5f-46d5-bca9-9a61f4bd8a82",
|
||||
"a5d7cd49-1b98-4534-a475-aeee4ccf5fa2",
|
||||
]
|
||||
}
|
||||
},
|
||||
},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"},
|
||||
},
|
||||
summary="Get Related Model Keys (Batch)",
|
||||
description="Retrieves all **unique related model keys** for a list of given models. This is useful for contextual suggestions or filtering.",
|
||||
)
|
||||
async def get_related_models_batch(
|
||||
req: ModelRelationshipBatchRequest = Body(..., description="Model keys to check for related connections"),
|
||||
) -> list[str]:
|
||||
"""
|
||||
Accepts multiple model keys and returns a flat list of all unique related keys.
|
||||
|
||||
Useful when working with multiple selections in the UI or cross-model comparisons.
|
||||
"""
|
||||
try:
|
||||
all_related: set[str] = set()
|
||||
for key in req.model_keys:
|
||||
related = ApiDependencies.invoker.services.model_relationships.get_related_model_keys(key)
|
||||
all_related.update(related)
|
||||
return list(all_related)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
@@ -1,31 +1,26 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi import Body, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
ClearResult,
|
||||
DeleteAllExceptCurrentResult,
|
||||
DeleteByDestinationResult,
|
||||
EnqueueBatchResult,
|
||||
FieldIdentifier,
|
||||
ItemIdsResult,
|
||||
PruneResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemNotFoundError,
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||
|
||||
session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"])
|
||||
|
||||
@@ -37,12 +32,6 @@ class SessionQueueAndProcessorStatus(BaseModel):
|
||||
processor: SessionProcessorStatus
|
||||
|
||||
|
||||
class ValidationRunData(BaseModel):
|
||||
workflow_id: str = Field(description="The id of the workflow being published.")
|
||||
input_fields: list[FieldIdentifier] = Body(description="The input fields for the published workflow")
|
||||
output_fields: list[FieldIdentifier] = Body(description="The output fields for the published workflow")
|
||||
|
||||
|
||||
@session_queue_router.post(
|
||||
"/{queue_id}/enqueue_batch",
|
||||
operation_id="enqueue_batch",
|
||||
@@ -54,89 +43,31 @@ async def enqueue_batch(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
batch: Batch = Body(description="Batch to process"),
|
||||
prepend: bool = Body(default=False, description="Whether or not to prepend this batch in the queue"),
|
||||
validation_run_data: Optional[ValidationRunData] = Body(
|
||||
default=None,
|
||||
description="The validation run data to use for this batch. This is only used if this is a validation run.",
|
||||
),
|
||||
) -> EnqueueBatchResult:
|
||||
"""Processes a batch and enqueues the output graphs for execution."""
|
||||
try:
|
||||
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
|
||||
queue_id=queue_id, batch=batch, prepend=prepend
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while enqueuing batch: {e}")
|
||||
|
||||
return ApiDependencies.invoker.services.session_queue.enqueue_batch(queue_id=queue_id, batch=batch, prepend=prepend)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
"/{queue_id}/list_all",
|
||||
operation_id="list_all_queue_items",
|
||||
"/{queue_id}/list",
|
||||
operation_id="list_queue_items",
|
||||
responses={
|
||||
200: {"model": list[SessionQueueItem]},
|
||||
200: {"model": CursorPaginatedResults[SessionQueueItemDTO]},
|
||||
},
|
||||
)
|
||||
async def list_all_queue_items(
|
||||
async def list_queue_items(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
|
||||
) -> list[SessionQueueItem]:
|
||||
"""Gets all queue items"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.list_all_queue_items(
|
||||
queue_id=queue_id,
|
||||
destination=destination,
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue items: {e}")
|
||||
limit: int = Query(default=50, description="The number of items to fetch"),
|
||||
status: Optional[QUEUE_ITEM_STATUS] = Query(default=None, description="The status of items to fetch"),
|
||||
cursor: Optional[int] = Query(default=None, description="The pagination cursor"),
|
||||
priority: int = Query(default=0, description="The pagination cursor priority"),
|
||||
) -> CursorPaginatedResults[SessionQueueItemDTO]:
|
||||
"""Gets all queue items (without graphs)"""
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
"/{queue_id}/item_ids",
|
||||
operation_id="get_queue_item_ids",
|
||||
responses={
|
||||
200: {"model": ItemIdsResult},
|
||||
},
|
||||
)
|
||||
async def get_queue_item_ids(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
) -> ItemIdsResult:
|
||||
"""Gets all queue item ids that match the given parameters"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_queue_item_ids(queue_id=queue_id, order_dir=order_dir)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue item ids: {e}")
|
||||
|
||||
|
||||
@session_queue_router.post(
|
||||
"/{queue_id}/items_by_ids",
|
||||
operation_id="get_queue_items_by_item_ids",
|
||||
responses={200: {"model": list[SessionQueueItem]}},
|
||||
)
|
||||
async def get_queue_items_by_item_ids(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
item_ids: list[int] = Body(
|
||||
embed=True, description="Object containing list of queue item ids to fetch queue items for"
|
||||
),
|
||||
) -> list[SessionQueueItem]:
|
||||
"""Gets queue items for the specified queue item ids. Maintains order of item ids."""
|
||||
try:
|
||||
session_queue_service = ApiDependencies.invoker.services.session_queue
|
||||
|
||||
# Fetch queue items preserving the order of requested item ids
|
||||
queue_items: list[SessionQueueItem] = []
|
||||
for item_id in item_ids:
|
||||
try:
|
||||
queue_item = session_queue_service.get_queue_item(item_id=item_id)
|
||||
if queue_item.queue_id != queue_id: # Auth protection for items from other queues
|
||||
continue
|
||||
queue_items.append(queue_item)
|
||||
except Exception:
|
||||
# Skip missing queue items - they may have been deleted between item id fetch and queue item fetch
|
||||
continue
|
||||
|
||||
return queue_items
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to get queue items")
|
||||
return ApiDependencies.invoker.services.session_queue.list_queue_items(
|
||||
queue_id=queue_id, limit=limit, status=status, cursor=cursor, priority=priority
|
||||
)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -148,10 +79,7 @@ async def resume(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionProcessorStatus:
|
||||
"""Resumes session processor"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_processor.resume()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while resuming queue: {e}")
|
||||
return ApiDependencies.invoker.services.session_processor.resume()
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -163,40 +91,7 @@ async def Pause(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionProcessorStatus:
|
||||
"""Pauses session processor"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while pausing queue: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_all_except_current",
|
||||
operation_id="cancel_all_except_current",
|
||||
responses={200: {"model": CancelAllExceptCurrentResult}},
|
||||
)
|
||||
async def cancel_all_except_current(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> CancelAllExceptCurrentResult:
|
||||
"""Immediately cancels all queue items except in-processing items"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling all except current: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/delete_all_except_current",
|
||||
operation_id="delete_all_except_current",
|
||||
responses={200: {"model": DeleteAllExceptCurrentResult}},
|
||||
)
|
||||
async def delete_all_except_current(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> DeleteAllExceptCurrentResult:
|
||||
"""Immediately deletes all queue items except in-processing items"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.delete_all_except_current(queue_id=queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting all except current: {e}")
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -209,46 +104,22 @@ async def cancel_by_batch_ids(
|
||||
batch_ids: list[str] = Body(description="The list of batch_ids to cancel all queue items for", embed=True),
|
||||
) -> CancelByBatchIDsResult:
|
||||
"""Immediately cancels all queue items from the given batch ids"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(
|
||||
queue_id=queue_id, batch_ids=batch_ids
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling by batch id: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(queue_id=queue_id, batch_ids=batch_ids)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_destination",
|
||||
operation_id="cancel_by_destination",
|
||||
responses={200: {"model": CancelByDestinationResult}},
|
||||
responses={200: {"model": CancelByBatchIDsResult}},
|
||||
)
|
||||
async def cancel_by_destination(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
destination: str = Query(description="The destination to cancel all queue items for"),
|
||||
) -> CancelByDestinationResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling by destination: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/retry_items_by_id",
|
||||
operation_id="retry_items_by_id",
|
||||
responses={200: {"model": RetryItemsResult}},
|
||||
)
|
||||
async def retry_items_by_id(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
item_ids: list[int] = Body(description="The queue item ids to retry"),
|
||||
) -> RetryItemsResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while retrying queue items: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -262,14 +133,11 @@ async def clear(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> ClearResult:
|
||||
"""Clears the queue entirely, immediately canceling the currently-executing session"""
|
||||
try:
|
||||
queue_item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
if queue_item is not None:
|
||||
ApiDependencies.invoker.services.session_queue.cancel_queue_item(queue_item.item_id)
|
||||
clear_result = ApiDependencies.invoker.services.session_queue.clear(queue_id)
|
||||
return clear_result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while clearing queue: {e}")
|
||||
queue_item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
if queue_item is not None:
|
||||
ApiDependencies.invoker.services.session_queue.cancel_queue_item(queue_item.item_id)
|
||||
clear_result = ApiDependencies.invoker.services.session_queue.clear(queue_id)
|
||||
return clear_result
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -283,10 +151,7 @@ async def prune(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> PruneResult:
|
||||
"""Prunes all completed or errored queue items"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.prune(queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while pruning queue: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.prune(queue_id)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -300,10 +165,7 @@ async def get_current_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> Optional[SessionQueueItem]:
|
||||
"""Gets the currently execution queue item"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting current queue item: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -317,10 +179,7 @@ async def get_next_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> Optional[SessionQueueItem]:
|
||||
"""Gets the next queue item, without executing it"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting next queue item: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -334,12 +193,9 @@ async def get_queue_status(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionQueueAndProcessorStatus:
|
||||
"""Gets the status of the session queue"""
|
||||
try:
|
||||
queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id)
|
||||
processor = ApiDependencies.invoker.services.session_processor.get_status()
|
||||
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting queue status: {e}")
|
||||
queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id)
|
||||
processor = ApiDependencies.invoker.services.session_processor.get_status()
|
||||
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -354,10 +210,7 @@ async def get_batch_status(
|
||||
batch_id: str = Path(description="The batch to get the status of"),
|
||||
) -> BatchStatus:
|
||||
"""Gets the status of the session queue"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting batch status: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -373,30 +226,7 @@ async def get_queue_item(
|
||||
item_id: int = Path(description="The queue item to get"),
|
||||
) -> SessionQueueItem:
|
||||
"""Gets a queue item"""
|
||||
try:
|
||||
queue_item = ApiDependencies.invoker.services.session_queue.get_queue_item(item_id=item_id)
|
||||
if queue_item.queue_id != queue_id:
|
||||
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
|
||||
return queue_item
|
||||
except SessionQueueItemNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching queue item: {e}")
|
||||
|
||||
|
||||
@session_queue_router.delete(
|
||||
"/{queue_id}/i/{item_id}",
|
||||
operation_id="delete_queue_item",
|
||||
)
|
||||
async def delete_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
item_id: int = Path(description="The queue item to delete"),
|
||||
) -> None:
|
||||
"""Deletes a queue item"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.session_queue.delete_queue_item(item_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting queue item: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -411,12 +241,8 @@ async def cancel_queue_item(
|
||||
item_id: int = Path(description="The queue item to cancel"),
|
||||
) -> SessionQueueItem:
|
||||
"""Deletes a queue item"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
|
||||
except SessionQueueItemNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling queue item: {e}")
|
||||
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -429,27 +255,6 @@ async def counts_by_destination(
|
||||
destination: str = Query(description="The destination to query"),
|
||||
) -> SessionQueueCountsByDestination:
|
||||
"""Gets the counts of queue items by destination"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching counts by destination: {e}")
|
||||
|
||||
|
||||
@session_queue_router.delete(
|
||||
"/{queue_id}/d/{destination}",
|
||||
operation_id="delete_by_destination",
|
||||
responses={200: {"model": DeleteByDestinationResult}},
|
||||
)
|
||||
async def delete_by_destination(
|
||||
queue_id: str = Path(description="The queue id to query"),
|
||||
destination: str = Path(description="The destination to query"),
|
||||
) -> DeleteByDestinationResult:
|
||||
"""Deletes all items with the given destination"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.delete_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting by destination: {e}")
|
||||
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
|
||||
@@ -25,7 +25,6 @@ async def parse_dynamicprompts(
|
||||
prompt: str = Body(description="The prompt to parse with dynamicprompts"),
|
||||
max_prompts: int = Body(ge=1, le=10000, default=1000, description="The max number of prompts to generate"),
|
||||
combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"),
|
||||
seed: int | None = Body(None, description="The seed to use for random generation. Only used if not combinatorial"),
|
||||
) -> DynamicPromptsResponse:
|
||||
"""Creates a batch process"""
|
||||
max_prompts = min(max_prompts, 10000)
|
||||
@@ -36,7 +35,7 @@ async def parse_dynamicprompts(
|
||||
generator = CombinatorialPromptGenerator()
|
||||
prompts = generator.generate(prompt, max_prompts=max_prompts)
|
||||
else:
|
||||
generator = RandomPromptGenerator(seed=seed)
|
||||
generator = RandomPromptGenerator()
|
||||
prompts = generator.generate(prompt, num_images=max_prompts)
|
||||
except ParseException as e:
|
||||
prompts = [prompt]
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.services.videos_common import (
|
||||
DeleteVideosResult,
|
||||
StarredVideosResult,
|
||||
UnstarredVideosResult,
|
||||
VideoDTO,
|
||||
VideoIdsResult,
|
||||
VideoRecordChanges,
|
||||
)
|
||||
|
||||
videos_router = APIRouter(prefix="/v1/videos", tags=["videos"])
|
||||
|
||||
|
||||
@videos_router.patch(
|
||||
"/i/{video_id}",
|
||||
operation_id="update_video",
|
||||
response_model=VideoDTO,
|
||||
)
|
||||
async def update_video(
|
||||
video_id: str = Path(description="The id of the video to update"),
|
||||
video_changes: VideoRecordChanges = Body(description="The changes to apply to the video"),
|
||||
) -> VideoDTO:
|
||||
"""Updates a video"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.get(
|
||||
"/i/{video_id}",
|
||||
operation_id="get_video_dto",
|
||||
response_model=VideoDTO,
|
||||
)
|
||||
async def get_video_dto(
|
||||
video_id: str = Path(description="The id of the video to get"),
|
||||
) -> VideoDTO:
|
||||
"""Gets a video's DTO"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post("/delete", operation_id="delete_videos_from_list", response_model=DeleteVideosResult)
|
||||
async def delete_videos_from_list(
|
||||
video_ids: list[str] = Body(description="The list of ids of videos to delete", embed=True),
|
||||
) -> DeleteVideosResult:
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post("/star", operation_id="star_videos_in_list", response_model=StarredVideosResult)
|
||||
async def star_videos_in_list(
|
||||
video_ids: list[str] = Body(description="The list of ids of videos to star", embed=True),
|
||||
) -> StarredVideosResult:
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post("/unstar", operation_id="unstar_videos_in_list", response_model=UnstarredVideosResult)
|
||||
async def unstar_videos_in_list(
|
||||
video_ids: list[str] = Body(description="The list of ids of videos to unstar", embed=True),
|
||||
) -> UnstarredVideosResult:
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.delete("/uncategorized", operation_id="delete_uncategorized_videos", response_model=DeleteVideosResult)
|
||||
async def delete_uncategorized_videos() -> DeleteVideosResult:
|
||||
"""Deletes all videos that are uncategorized"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.get("/", operation_id="list_video_dtos", response_model=OffsetPaginatedResults[VideoDTO])
|
||||
async def list_video_dtos(
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find videos without a board.",
|
||||
),
|
||||
offset: int = Query(default=0, description="The page offset"),
|
||||
limit: int = Query(default=10, description="The number of videos per page"),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
|
||||
search_term: Optional[str] = Query(default=None, description="The term to search for"),
|
||||
) -> OffsetPaginatedResults[VideoDTO]:
|
||||
"""Lists video DTOs"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.get("/ids", operation_id="get_video_ids")
|
||||
async def get_video_ids(
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find videos without a board.",
|
||||
),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
|
||||
search_term: Optional[str] = Query(default=None, description="The term to search for"),
|
||||
) -> VideoIdsResult:
|
||||
"""Gets ordered list of video ids with metadata for optimistic updates"""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@videos_router.post(
|
||||
"/videos_by_ids",
|
||||
operation_id="get_videos_by_ids",
|
||||
responses={200: {"model": list[VideoDTO]}},
|
||||
)
|
||||
async def get_videos_by_ids(
|
||||
video_ids: list[str] = Body(embed=True, description="Object containing list of video ids to fetch DTOs for"),
|
||||
) -> list[VideoDTO]:
|
||||
"""Gets video DTOs for the specified video ids. Maintains order of input ids."""
|
||||
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
@@ -1,10 +1,6 @@
|
||||
import io
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Body, File, HTTPException, Path, Query, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from PIL import Image
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, Query
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
@@ -14,14 +10,11 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowCategory,
|
||||
WorkflowNotFoundError,
|
||||
WorkflowRecordDTO,
|
||||
WorkflowRecordListItemWithThumbnailDTO,
|
||||
WorkflowRecordListItemDTO,
|
||||
WorkflowRecordOrderBy,
|
||||
WorkflowRecordWithThumbnailDTO,
|
||||
WorkflowWithoutID,
|
||||
)
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_common import WorkflowThumbnailFileNotFoundException
|
||||
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
||||
|
||||
|
||||
@@ -29,17 +22,15 @@ workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
||||
"/i/{workflow_id}",
|
||||
operation_id="get_workflow",
|
||||
responses={
|
||||
200: {"model": WorkflowRecordWithThumbnailDTO},
|
||||
200: {"model": WorkflowRecordDTO},
|
||||
},
|
||||
)
|
||||
async def get_workflow(
|
||||
workflow_id: str = Path(description="The workflow to get"),
|
||||
) -> WorkflowRecordWithThumbnailDTO:
|
||||
) -> WorkflowRecordDTO:
|
||||
"""Gets a workflow"""
|
||||
try:
|
||||
thumbnail_url = ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow_id)
|
||||
workflow = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
return WorkflowRecordWithThumbnailDTO(thumbnail_url=thumbnail_url, **workflow.model_dump())
|
||||
return ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
except WorkflowNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
@@ -66,11 +57,6 @@ async def delete_workflow(
|
||||
workflow_id: str = Path(description="The workflow to delete"),
|
||||
) -> None:
|
||||
"""Deletes a workflow"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
|
||||
except WorkflowThumbnailFileNotFoundException:
|
||||
# It's OK if the workflow has no thumbnail file. We can still delete the workflow.
|
||||
pass
|
||||
ApiDependencies.invoker.services.workflow_records.delete(workflow_id)
|
||||
|
||||
|
||||
@@ -92,170 +78,20 @@ async def create_workflow(
|
||||
"/",
|
||||
operation_id="list_workflows",
|
||||
responses={
|
||||
200: {"model": PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]},
|
||||
200: {"model": PaginatedResults[WorkflowRecordListItemDTO]},
|
||||
},
|
||||
)
|
||||
async def list_workflows(
|
||||
page: int = Query(default=0, description="The page to get"),
|
||||
per_page: Optional[int] = Query(default=None, description="The number of workflows per page"),
|
||||
per_page: int = Query(default=10, description="The number of workflows per page"),
|
||||
order_by: WorkflowRecordOrderBy = Query(
|
||||
default=WorkflowRecordOrderBy.Name, description="The attribute to order by"
|
||||
),
|
||||
direction: SQLiteDirection = Query(default=SQLiteDirection.Ascending, description="The direction to order by"),
|
||||
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories of workflow to get"),
|
||||
tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"),
|
||||
category: WorkflowCategory = Query(default=WorkflowCategory.User, description="The category of workflow to get"),
|
||||
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
|
||||
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||
is_published: Optional[bool] = Query(default=None, description="Whether to include/exclude published workflows"),
|
||||
) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]:
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
"""Gets a page of workflows"""
|
||||
workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = []
|
||||
workflows = ApiDependencies.invoker.services.workflow_records.get_many(
|
||||
order_by=order_by,
|
||||
direction=direction,
|
||||
page=page,
|
||||
per_page=per_page,
|
||||
query=query,
|
||||
categories=categories,
|
||||
tags=tags,
|
||||
has_been_opened=has_been_opened,
|
||||
is_published=is_published,
|
||||
return ApiDependencies.invoker.services.workflow_records.get_many(
|
||||
page=page, per_page=per_page, order_by=order_by, direction=direction, query=query, category=category
|
||||
)
|
||||
for workflow in workflows.items:
|
||||
workflows_with_thumbnails.append(
|
||||
WorkflowRecordListItemWithThumbnailDTO(
|
||||
thumbnail_url=ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow.workflow_id),
|
||||
**workflow.model_dump(),
|
||||
)
|
||||
)
|
||||
return PaginatedResults[WorkflowRecordListItemWithThumbnailDTO](
|
||||
items=workflows_with_thumbnails,
|
||||
total=workflows.total,
|
||||
page=workflows.page,
|
||||
pages=workflows.pages,
|
||||
per_page=workflows.per_page,
|
||||
)
|
||||
|
||||
|
||||
@workflows_router.put(
|
||||
"/i/{workflow_id}/thumbnail",
|
||||
operation_id="set_workflow_thumbnail",
|
||||
responses={
|
||||
200: {"model": WorkflowRecordDTO},
|
||||
},
|
||||
)
|
||||
async def set_workflow_thumbnail(
|
||||
workflow_id: str = Path(description="The workflow to update"),
|
||||
image: UploadFile = File(description="The image file to upload"),
|
||||
):
|
||||
"""Sets a workflow's thumbnail image"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
except WorkflowNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
if not image.content_type or not image.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
contents = await image.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_thumbnails.save(workflow_id, pil_image)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@workflows_router.delete(
|
||||
"/i/{workflow_id}/thumbnail",
|
||||
operation_id="delete_workflow_thumbnail",
|
||||
responses={
|
||||
200: {"model": WorkflowRecordDTO},
|
||||
},
|
||||
)
|
||||
async def delete_workflow_thumbnail(
|
||||
workflow_id: str = Path(description="The workflow to update"),
|
||||
):
|
||||
"""Removes a workflow's thumbnail image"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
except WorkflowNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@workflows_router.get(
|
||||
"/i/{workflow_id}/thumbnail",
|
||||
operation_id="get_workflow_thumbnail",
|
||||
responses={
|
||||
200: {
|
||||
"description": "The workflow thumbnail was fetched successfully",
|
||||
},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "The workflow thumbnail could not be found"},
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
async def get_workflow_thumbnail(
|
||||
workflow_id: str = Path(description="The id of the workflow thumbnail to get"),
|
||||
) -> FileResponse:
|
||||
"""Gets a workflow's thumbnail image"""
|
||||
|
||||
try:
|
||||
path = ApiDependencies.invoker.services.workflow_thumbnails.get_path(workflow_id)
|
||||
|
||||
response = FileResponse(
|
||||
path,
|
||||
media_type="image/png",
|
||||
filename=workflow_id + ".png",
|
||||
content_disposition_type="inline",
|
||||
)
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@workflows_router.get("/counts_by_tag", operation_id="get_counts_by_tag")
|
||||
async def get_counts_by_tag(
|
||||
tags: list[str] = Query(description="The tags to get counts for"),
|
||||
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories to include"),
|
||||
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||
) -> dict[str, int]:
|
||||
"""Counts workflows by tag"""
|
||||
|
||||
return ApiDependencies.invoker.services.workflow_records.counts_by_tag(
|
||||
tags=tags, categories=categories, has_been_opened=has_been_opened
|
||||
)
|
||||
|
||||
|
||||
@workflows_router.get("/counts_by_category", operation_id="counts_by_category")
|
||||
async def counts_by_category(
|
||||
categories: list[WorkflowCategory] = Query(description="The categories to include"),
|
||||
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||
) -> dict[str, int]:
|
||||
"""Counts workflows by category"""
|
||||
|
||||
return ApiDependencies.invoker.services.workflow_records.counts_by_category(
|
||||
categories=categories, has_been_opened=has_been_opened
|
||||
)
|
||||
|
||||
|
||||
@workflows_router.put(
|
||||
"/i/{workflow_id}/opened_at",
|
||||
operation_id="update_opened_at",
|
||||
)
|
||||
async def update_opened_at(
|
||||
workflow_id: str = Path(description="The workflow to update"),
|
||||
) -> None:
|
||||
"""Updates the opened_at field of a workflow"""
|
||||
ApiDependencies.invoker.services.workflow_records.update_opened_at(workflow_id)
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import mimetypes
|
||||
import socket
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import uvicorn
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
@@ -11,33 +15,47 @@ from fastapi.responses import HTMLResponse, RedirectResponse
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
|
||||
from torch.backends.mps import is_available as is_mps_available
|
||||
|
||||
# for PyCharm:
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
import invokeai.frontend.web as web_dir
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
from invokeai.app.api.routers import (
|
||||
app_info,
|
||||
board_images,
|
||||
board_videos,
|
||||
boards,
|
||||
client_state,
|
||||
download_queue,
|
||||
images,
|
||||
model_manager,
|
||||
model_relationships,
|
||||
session_queue,
|
||||
style_presets,
|
||||
utilities,
|
||||
videos,
|
||||
workflows,
|
||||
)
|
||||
from invokeai.app.api.sockets import SocketIO
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.custom_openapi import get_openapi_func
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
app_config = get_config()
|
||||
|
||||
|
||||
if is_mps_available():
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
|
||||
logger = InvokeAILogger.get_logger(config=app_config)
|
||||
# fix for windows mimetypes registry entries being borked
|
||||
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
logger.info(f"Using torch device: {torch_device_name}")
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
@@ -46,23 +64,6 @@ loop = asyncio.new_event_loop()
|
||||
async def lifespan(app: FastAPI):
|
||||
# Add startup event to load dependencies
|
||||
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, loop=loop, logger=logger)
|
||||
|
||||
# Log the server address when it starts - in case the network log level is not high enough to see the startup log
|
||||
proto = "https" if app_config.ssl_certfile else "http"
|
||||
msg = f"Invoke running on {proto}://{app_config.host}:{app_config.port} (Press CTRL+C to quit)"
|
||||
|
||||
# Logging this way ignores the logger's log level and _always_ logs the message
|
||||
record = logger.makeRecord(
|
||||
name=logger.name,
|
||||
level=logging.INFO,
|
||||
fn="",
|
||||
lno=0,
|
||||
msg=msg,
|
||||
args=(),
|
||||
exc_info=None,
|
||||
)
|
||||
logger.handle(record)
|
||||
|
||||
yield
|
||||
# Shut down threads
|
||||
ApiDependencies.shutdown()
|
||||
@@ -127,16 +128,12 @@ app.include_router(utilities.utilities_router, prefix="/api")
|
||||
app.include_router(model_manager.model_manager_router, prefix="/api")
|
||||
app.include_router(download_queue.download_queue_router, prefix="/api")
|
||||
app.include_router(images.images_router, prefix="/api")
|
||||
app.include_router(videos.videos_router, prefix="/api")
|
||||
app.include_router(boards.boards_router, prefix="/api")
|
||||
app.include_router(board_images.board_images_router, prefix="/api")
|
||||
app.include_router(board_videos.board_videos_router, prefix="/api")
|
||||
app.include_router(model_relationships.model_relationships_router, prefix="/api")
|
||||
app.include_router(app_info.app_router, prefix="/api")
|
||||
app.include_router(session_queue.session_queue_router, prefix="/api")
|
||||
app.include_router(workflows.workflows_router, prefix="/api")
|
||||
app.include_router(style_presets.style_presets_router, prefix="/api")
|
||||
app.include_router(client_state.client_state_router, prefix="/api")
|
||||
|
||||
app.openapi = get_openapi_func(app)
|
||||
|
||||
@@ -161,16 +158,80 @@ def overridden_redoc() -> HTMLResponse:
|
||||
|
||||
web_root_path = Path(list(web_dir.__path__)[0])
|
||||
|
||||
if app_config.unsafe_disable_picklescan:
|
||||
logger.warning(
|
||||
"The unsafe_disable_picklescan option is enabled. This disables malware scanning while installing and"
|
||||
"loading models, which may allow malicious code to be executed. Use at your own risk."
|
||||
)
|
||||
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
|
||||
def check_cudnn(logger: logging.Logger) -> None:
|
||||
"""Check for cuDNN issues that could be causing degraded performance."""
|
||||
if torch.backends.cudnn.is_available():
|
||||
try:
|
||||
# Note: At the time of writing (torch 2.2.1), torch.backends.cudnn.version() only raises an error the first
|
||||
# time it is called. Subsequent calls will return the version number without complaining about a mismatch.
|
||||
cudnn_version = torch.backends.cudnn.version()
|
||||
logger.info(f"cuDNN version: {cudnn_version}")
|
||||
except RuntimeError as e:
|
||||
logger.warning(
|
||||
"Encountered a cuDNN version issue. This may result in degraded performance. This issue is usually "
|
||||
"caused by an incompatible cuDNN version installed in your python environment, or on the host "
|
||||
f"system. Full error message:\n{e}"
|
||||
)
|
||||
|
||||
|
||||
def invoke_api() -> None:
|
||||
def find_port(port: int) -> int:
|
||||
"""Find a port not in use starting at given port"""
|
||||
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
|
||||
# https://github.com/WaylonWalker
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(1)
|
||||
if s.connect_ex(("localhost", port)) == 0:
|
||||
return find_port(port=port + 1)
|
||||
else:
|
||||
return port
|
||||
|
||||
if app_config.dev_reload:
|
||||
try:
|
||||
import jurigged
|
||||
except ImportError as e:
|
||||
logger.error(
|
||||
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.',
|
||||
exc_info=e,
|
||||
)
|
||||
else:
|
||||
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
|
||||
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
|
||||
check_cudnn(logger)
|
||||
|
||||
config = uvicorn.Config(
|
||||
app=app,
|
||||
host=app_config.host,
|
||||
port=port,
|
||||
loop="asyncio",
|
||||
log_level=app_config.log_level,
|
||||
ssl_certfile=app_config.ssl_certfile,
|
||||
ssl_keyfile=app_config.ssl_keyfile,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
|
||||
# replace uvicorn's loggers with InvokeAI's for consistent appearance
|
||||
for logname in ["uvicorn.access", "uvicorn"]:
|
||||
log = InvokeAILogger.get_logger(logname)
|
||||
log.handlers.clear()
|
||||
for ch in logger.handlers:
|
||||
log.addHandler(ch)
|
||||
|
||||
loop.run_until_complete(server.serve())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
invoke_api()
|
||||
|
||||
@@ -1,5 +1,28 @@
|
||||
import shutil
|
||||
import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
custom_nodes_path = Path(get_config().custom_nodes_path)
|
||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
|
||||
|
||||
# copy our custom nodes __init__.py to the custom nodes directory
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
|
||||
|
||||
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
|
||||
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
# add core nodes to __all__
|
||||
python_files = filter(lambda f: not f.name.startswith("_"), Path(__file__).parent.glob("*.py"))
|
||||
__all__ = [f.stem for f in python_files] # type: ignore
|
||||
|
||||
@@ -4,13 +4,9 @@ from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import typing
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from functools import lru_cache
|
||||
from inspect import signature
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -22,23 +18,19 @@ from typing import (
|
||||
Literal,
|
||||
Optional,
|
||||
Type,
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
import semver
|
||||
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, create_model
|
||||
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model
|
||||
from pydantic.fields import FieldInfo
|
||||
from pydantic_core import PydanticUndefined
|
||||
from typing_extensions import TypeAliasType
|
||||
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldKind,
|
||||
Input,
|
||||
InputFieldJSONSchemaExtra,
|
||||
UIType,
|
||||
migrate_model_ui_type,
|
||||
)
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -51,6 +43,8 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
CUSTOM_NODE_PACK_SUFFIX = "__invokeai-custom-node"
|
||||
|
||||
|
||||
class InvalidVersionError(ValueError):
|
||||
pass
|
||||
@@ -68,7 +62,6 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
|
||||
- `Deprecated`: The invocation is deprecated and may be removed in a future version.
|
||||
- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.
|
||||
- `Special`: The invocation is a special case and does not fit into any of the other classifications.
|
||||
"""
|
||||
|
||||
Stable = "stable"
|
||||
@@ -76,18 +69,6 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
Prototype = "prototype"
|
||||
Deprecated = "deprecated"
|
||||
Internal = "internal"
|
||||
Special = "special"
|
||||
|
||||
|
||||
class Bottleneck(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
The bottleneck of an invocation.
|
||||
- `Network`: The invocation's execution is network-bound.
|
||||
- `GPU`: The invocation's execution is GPU-bound.
|
||||
"""
|
||||
|
||||
Network = "network"
|
||||
GPU = "gpu"
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
@@ -96,7 +77,7 @@ class UIConfigBase(BaseModel):
|
||||
This is used internally by the @invocation decorator logic. Do not use this directly.
|
||||
"""
|
||||
|
||||
tags: Optional[list[str]] = Field(default=None, description="The node's tags")
|
||||
tags: Optional[list[str]] = Field(default_factory=None, description="The node's tags")
|
||||
title: Optional[str] = Field(default=None, description="The node's display name")
|
||||
category: Optional[str] = Field(default=None, description="The node's category")
|
||||
version: str = Field(
|
||||
@@ -111,11 +92,6 @@ class UIConfigBase(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class OriginalModelField(TypedDict):
|
||||
annotation: Any
|
||||
field_info: FieldInfo
|
||||
|
||||
|
||||
class BaseInvocationOutput(BaseModel):
|
||||
"""
|
||||
Base class for all invocation outputs.
|
||||
@@ -123,11 +99,36 @@ class BaseInvocationOutput(BaseModel):
|
||||
All invocation outputs must use the `@invocation_output` decorator to provide their unique type.
|
||||
"""
|
||||
|
||||
output_meta: Optional[dict[str, JsonValue]] = Field(
|
||||
default=None,
|
||||
description="Optional dictionary of metadata for the invocation output, unrelated to the invocation's actual output value. This is not exposed as an output field.",
|
||||
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
_output_classes: ClassVar[set[BaseInvocationOutput]] = set()
|
||||
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
|
||||
_typeadapter_needs_update: ClassVar[bool] = False
|
||||
|
||||
@classmethod
|
||||
def register_output(cls, output: BaseInvocationOutput) -> None:
|
||||
"""Registers an invocation output."""
|
||||
cls._output_classes.add(output)
|
||||
cls._typeadapter_needs_update = True
|
||||
|
||||
@classmethod
|
||||
def get_outputs(cls) -> Iterable[BaseInvocationOutput]:
|
||||
"""Gets all invocation outputs."""
|
||||
return cls._output_classes
|
||||
|
||||
@classmethod
|
||||
def get_typeadapter(cls) -> TypeAdapter[Any]:
|
||||
"""Gets a pydantc TypeAdapter for the union of all invocation output types."""
|
||||
if not cls._typeadapter or cls._typeadapter_needs_update:
|
||||
AnyInvocationOutput = TypeAliasType(
|
||||
"AnyInvocationOutput", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")]
|
||||
)
|
||||
cls._typeadapter = TypeAdapter(AnyInvocationOutput)
|
||||
cls._typeadapter_needs_update = False
|
||||
return cls._typeadapter
|
||||
|
||||
@classmethod
|
||||
def get_output_types(cls) -> Iterable[str]:
|
||||
"""Gets all invocation output types."""
|
||||
return (i.get_type() for i in BaseInvocationOutput.get_outputs())
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocationOutput]) -> None:
|
||||
@@ -144,9 +145,6 @@ class BaseInvocationOutput(BaseModel):
|
||||
"""Gets the invocation output's type, as provided by the `@invocation_output` decorator."""
|
||||
return cls.model_fields["type"].default
|
||||
|
||||
_original_model_fields: ClassVar[dict[str, OriginalModelField]] = {}
|
||||
"""The original model fields, before any modifications were made by the @invocation_output decorator."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
protected_namespaces=(),
|
||||
validate_assignment=True,
|
||||
@@ -174,13 +172,61 @@ class BaseInvocation(ABC, BaseModel):
|
||||
All invocations must use the `@invocation` decorator to provide their unique type.
|
||||
"""
|
||||
|
||||
_invocation_classes: ClassVar[set[BaseInvocation]] = set()
|
||||
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
|
||||
_typeadapter_needs_update: ClassVar[bool] = False
|
||||
|
||||
@classmethod
|
||||
def get_type(cls) -> str:
|
||||
"""Gets the invocation's type, as provided by the `@invocation` decorator."""
|
||||
return cls.model_fields["type"].default
|
||||
|
||||
@classmethod
|
||||
def get_output_annotation(cls) -> Type[BaseInvocationOutput]:
|
||||
def register_invocation(cls, invocation: BaseInvocation) -> None:
|
||||
"""Registers an invocation."""
|
||||
cls._invocation_classes.add(invocation)
|
||||
cls._typeadapter_needs_update = True
|
||||
|
||||
@classmethod
|
||||
def get_typeadapter(cls) -> TypeAdapter[Any]:
|
||||
"""Gets a pydantc TypeAdapter for the union of all invocation types."""
|
||||
if not cls._typeadapter or cls._typeadapter_needs_update:
|
||||
AnyInvocation = TypeAliasType(
|
||||
"AnyInvocation", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")]
|
||||
)
|
||||
cls._typeadapter = TypeAdapter(AnyInvocation)
|
||||
cls._typeadapter_needs_update = False
|
||||
return cls._typeadapter
|
||||
|
||||
@classmethod
|
||||
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||
app_config = get_config()
|
||||
allowed_invocations: set[BaseInvocation] = set()
|
||||
for sc in cls._invocation_classes:
|
||||
invocation_type = sc.get_type()
|
||||
is_in_allowlist = (
|
||||
invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True
|
||||
)
|
||||
is_in_denylist = (
|
||||
invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False
|
||||
)
|
||||
if is_in_allowlist and not is_in_denylist:
|
||||
allowed_invocations.add(sc)
|
||||
return allowed_invocations
|
||||
|
||||
@classmethod
|
||||
def get_invocations_map(cls) -> dict[str, BaseInvocation]:
|
||||
"""Gets a map of all invocation types to their invocation classes."""
|
||||
return {i.get_type(): i for i in BaseInvocation.get_invocations()}
|
||||
|
||||
@classmethod
|
||||
def get_invocation_types(cls) -> Iterable[str]:
|
||||
"""Gets all invocation types."""
|
||||
return (i.get_type() for i in BaseInvocation.get_invocations())
|
||||
|
||||
@classmethod
|
||||
def get_output_annotation(cls) -> BaseInvocationOutput:
|
||||
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
|
||||
return signature(cls.invoke).return_annotation
|
||||
|
||||
@@ -212,7 +258,7 @@ class BaseInvocation(ABC, BaseModel):
|
||||
Internal invoke method, calls `invoke()` after some prep.
|
||||
Handles optional fields that are required to call `invoke()` and invocation cache.
|
||||
"""
|
||||
for field_name, field in type(self).model_fields.items():
|
||||
for field_name, field in self.model_fields.items():
|
||||
if not field.json_schema_extra or callable(field.json_schema_extra):
|
||||
# something has gone terribly awry, we should always have this and it should be a dict
|
||||
continue
|
||||
@@ -227,9 +273,9 @@ class BaseInvocation(ABC, BaseModel):
|
||||
setattr(self, field_name, orig_default)
|
||||
if orig_required and orig_default is PydanticUndefined and getattr(self, field_name) is None:
|
||||
if input_ == Input.Connection:
|
||||
raise RequiredConnectionException(type(self).model_fields["type"].default, field_name)
|
||||
raise RequiredConnectionException(self.model_fields["type"].default, field_name)
|
||||
elif input_ == Input.Any:
|
||||
raise MissingInputException(type(self).model_fields["type"].default, field_name)
|
||||
raise MissingInputException(self.model_fields["type"].default, field_name)
|
||||
|
||||
# skip node cache codepath if it's disabled
|
||||
if services.configuration.node_cache_size == 0:
|
||||
@@ -259,9 +305,7 @@ class BaseInvocation(ABC, BaseModel):
|
||||
is_intermediate: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not this is an intermediate invocation.",
|
||||
json_schema_extra=InputFieldJSONSchemaExtra(
|
||||
input=Input.Direct, field_kind=FieldKind.NodeAttribute, ui_type=UIType._IsIntermediate
|
||||
).model_dump(exclude_none=True),
|
||||
json_schema_extra={"ui_type": "IsIntermediate", "field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
use_cache: bool = Field(
|
||||
default=True,
|
||||
@@ -269,8 +313,6 @@ class BaseInvocation(ABC, BaseModel):
|
||||
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
|
||||
bottleneck: ClassVar[Bottleneck]
|
||||
|
||||
UIConfig: ClassVar[UIConfigBase]
|
||||
|
||||
model_config = ConfigDict(
|
||||
@@ -281,163 +323,21 @@ class BaseInvocation(ABC, BaseModel):
|
||||
coerce_numbers_to_str=True,
|
||||
)
|
||||
|
||||
_original_model_fields: ClassVar[dict[str, OriginalModelField]] = {}
|
||||
"""The original model fields, before any modifications were made by the @invocation decorator."""
|
||||
|
||||
|
||||
TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation)
|
||||
|
||||
|
||||
class InvocationRegistry:
|
||||
_invocation_classes: ClassVar[set[type[BaseInvocation]]] = set()
|
||||
_output_classes: ClassVar[set[type[BaseInvocationOutput]]] = set()
|
||||
|
||||
@classmethod
|
||||
def register_invocation(cls, invocation: type[BaseInvocation]) -> None:
|
||||
"""Registers an invocation."""
|
||||
|
||||
invocation_type = invocation.get_type()
|
||||
node_pack = invocation.UIConfig.node_pack
|
||||
|
||||
# Log a warning when an existing invocation is being clobbered by the one we are registering
|
||||
clobbered_invocation = InvocationRegistry.get_invocation_for_type(invocation_type)
|
||||
if clobbered_invocation is not None:
|
||||
# This should always be true - we just checked if the invocation type was in the set
|
||||
clobbered_node_pack = clobbered_invocation.UIConfig.node_pack
|
||||
|
||||
if clobbered_node_pack == "invokeai":
|
||||
# The invocation being clobbered is a core invocation
|
||||
logger.warning(f'Overriding core node "{invocation_type}" with node from "{node_pack}"')
|
||||
else:
|
||||
# The invocation being clobbered is a custom invocation
|
||||
logger.warning(
|
||||
f'Overriding node "{invocation_type}" from "{node_pack}" with node from "{clobbered_node_pack}"'
|
||||
)
|
||||
cls._invocation_classes.remove(clobbered_invocation)
|
||||
|
||||
cls._invocation_classes.add(invocation)
|
||||
cls.invalidate_invocation_typeadapter()
|
||||
|
||||
@classmethod
|
||||
@lru_cache(maxsize=1)
|
||||
def get_invocation_typeadapter(cls) -> TypeAdapter[Any]:
|
||||
"""Gets a pydantic TypeAdapter for the union of all invocation types.
|
||||
|
||||
This is used to parse serialized invocations into the correct invocation class.
|
||||
|
||||
This method is cached to avoid rebuilding the TypeAdapter on every access. If the invocation allowlist or
|
||||
denylist is changed, the cache should be cleared to ensure the TypeAdapter is updated and validation respects
|
||||
the updated allowlist and denylist.
|
||||
|
||||
@see https://docs.pydantic.dev/latest/concepts/type_adapter/
|
||||
"""
|
||||
return TypeAdapter(Annotated[Union[tuple(cls.get_invocation_classes())], Field(discriminator="type")])
|
||||
|
||||
@classmethod
|
||||
def invalidate_invocation_typeadapter(cls) -> None:
|
||||
"""Invalidates the cached invocation type adapter."""
|
||||
cls.get_invocation_typeadapter.cache_clear()
|
||||
|
||||
@classmethod
|
||||
def get_invocation_classes(cls) -> Iterable[type[BaseInvocation]]:
|
||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||
app_config = get_config()
|
||||
allowed_invocations: set[type[BaseInvocation]] = set()
|
||||
for sc in cls._invocation_classes:
|
||||
invocation_type = sc.get_type()
|
||||
is_in_allowlist = (
|
||||
invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True
|
||||
)
|
||||
is_in_denylist = (
|
||||
invocation_type in app_config.deny_nodes if isinstance(app_config.deny_nodes, list) else False
|
||||
)
|
||||
if is_in_allowlist and not is_in_denylist:
|
||||
allowed_invocations.add(sc)
|
||||
return allowed_invocations
|
||||
|
||||
@classmethod
|
||||
def get_invocations_map(cls) -> dict[str, type[BaseInvocation]]:
|
||||
"""Gets a map of all invocation types to their invocation classes."""
|
||||
return {i.get_type(): i for i in cls.get_invocation_classes()}
|
||||
|
||||
@classmethod
|
||||
def get_invocation_types(cls) -> Iterable[str]:
|
||||
"""Gets all invocation types."""
|
||||
return (i.get_type() for i in cls.get_invocation_classes())
|
||||
|
||||
@classmethod
|
||||
def get_invocation_for_type(cls, invocation_type: str) -> type[BaseInvocation] | None:
|
||||
"""Gets the invocation class for a given invocation type."""
|
||||
return cls.get_invocations_map().get(invocation_type)
|
||||
|
||||
@classmethod
|
||||
def register_output(cls, output: "type[TBaseInvocationOutput]") -> None:
|
||||
"""Registers an invocation output."""
|
||||
output_type = output.get_type()
|
||||
|
||||
# Log a warning when an existing invocation is being clobbered by the one we are registering
|
||||
clobbered_output = InvocationRegistry.get_output_for_type(output_type)
|
||||
if clobbered_output is not None:
|
||||
# TODO(psyche): We do not record the node pack of the output, so we cannot log it here
|
||||
logger.warning(f'Overriding invocation output "{output_type}"')
|
||||
cls._output_classes.remove(clobbered_output)
|
||||
|
||||
cls._output_classes.add(output)
|
||||
cls.invalidate_output_typeadapter()
|
||||
|
||||
@classmethod
|
||||
def get_output_classes(cls) -> Iterable[type[BaseInvocationOutput]]:
|
||||
"""Gets all invocation outputs."""
|
||||
return cls._output_classes
|
||||
|
||||
@classmethod
|
||||
def get_outputs_map(cls) -> dict[str, type[BaseInvocationOutput]]:
|
||||
"""Gets a map of all output types to their output classes."""
|
||||
return {i.get_type(): i for i in cls.get_output_classes()}
|
||||
|
||||
@classmethod
|
||||
@lru_cache(maxsize=1)
|
||||
def get_output_typeadapter(cls) -> TypeAdapter[Any]:
|
||||
"""Gets a pydantic TypeAdapter for the union of all invocation output types.
|
||||
|
||||
This is used to parse serialized invocation outputs into the correct invocation output class.
|
||||
|
||||
This method is cached to avoid rebuilding the TypeAdapter on every access. If the invocation allowlist or
|
||||
denylist is changed, the cache should be cleared to ensure the TypeAdapter is updated and validation respects
|
||||
the updated allowlist and denylist.
|
||||
|
||||
@see https://docs.pydantic.dev/latest/concepts/type_adapter/
|
||||
"""
|
||||
return TypeAdapter(Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")])
|
||||
|
||||
@classmethod
|
||||
def invalidate_output_typeadapter(cls) -> None:
|
||||
"""Invalidates the cached invocation output type adapter."""
|
||||
cls.get_output_typeadapter.cache_clear()
|
||||
|
||||
@classmethod
|
||||
def get_output_types(cls) -> Iterable[str]:
|
||||
"""Gets all invocation output types."""
|
||||
return (i.get_type() for i in cls.get_output_classes())
|
||||
|
||||
@classmethod
|
||||
def get_output_for_type(cls, output_type: str) -> type[BaseInvocationOutput] | None:
|
||||
"""Gets the output class for a given output type."""
|
||||
return cls.get_outputs_map().get(output_type)
|
||||
|
||||
|
||||
RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = {
|
||||
"id",
|
||||
"is_intermediate",
|
||||
"use_cache",
|
||||
"type",
|
||||
"workflow",
|
||||
"bottleneck",
|
||||
}
|
||||
|
||||
RESERVED_INPUT_FIELD_NAMES = {"metadata", "board"}
|
||||
|
||||
RESERVED_OUTPUT_FIELD_NAMES = {"type", "output_meta"}
|
||||
RESERVED_OUTPUT_FIELD_NAMES = {"type"}
|
||||
|
||||
|
||||
class _Model(BaseModel):
|
||||
@@ -450,15 +350,6 @@ with warnings.catch_warnings():
|
||||
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
||||
|
||||
|
||||
def is_enum_member(value: Any, enum_class: type[Enum]) -> bool:
|
||||
"""Checks if a value is a member of an enum class."""
|
||||
try:
|
||||
enum_class(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
|
||||
"""
|
||||
Validates the fields of an invocation or invocation output:
|
||||
@@ -470,144 +361,54 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
"""
|
||||
for name, field in model_fields.items():
|
||||
if name in RESERVED_PYDANTIC_FIELD_NAMES:
|
||||
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved by pydantic)")
|
||||
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved by pydantic)')
|
||||
|
||||
if not field.annotation:
|
||||
raise InvalidFieldError(f"{model_type}.{name}: Invalid field type (missing annotation)")
|
||||
raise InvalidFieldError(f'Invalid field type "{name}" on "{model_type}" (missing annotation)')
|
||||
|
||||
if not isinstance(field.json_schema_extra, dict):
|
||||
raise InvalidFieldError(f"{model_type}.{name}: Invalid field definition (missing json_schema_extra dict)")
|
||||
raise InvalidFieldError(
|
||||
f'Invalid field definition for "{name}" on "{model_type}" (missing json_schema_extra dict)'
|
||||
)
|
||||
|
||||
field_kind = field.json_schema_extra.get("field_kind", None)
|
||||
|
||||
# must have a field_kind
|
||||
if not is_enum_member(field_kind, FieldKind):
|
||||
if not isinstance(field_kind, FieldKind):
|
||||
raise InvalidFieldError(
|
||||
f"{model_type}.{name}: Invalid field definition for (maybe it's not an InputField or OutputField?)"
|
||||
f'Invalid field definition for "{name}" on "{model_type}" (maybe it\'s not an InputField or OutputField?)'
|
||||
)
|
||||
|
||||
if field_kind == FieldKind.Input.value and (
|
||||
if field_kind is FieldKind.Input and (
|
||||
name in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES or name in RESERVED_INPUT_FIELD_NAMES
|
||||
):
|
||||
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved input field name)")
|
||||
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved input field name)')
|
||||
|
||||
if field_kind == FieldKind.Output.value and name in RESERVED_OUTPUT_FIELD_NAMES:
|
||||
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved output field name)")
|
||||
if field_kind is FieldKind.Output and name in RESERVED_OUTPUT_FIELD_NAMES:
|
||||
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved output field name)')
|
||||
|
||||
if field_kind == FieldKind.Internal.value and name not in RESERVED_INPUT_FIELD_NAMES:
|
||||
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (internal field without reserved name)")
|
||||
if (field_kind is FieldKind.Internal) and name not in RESERVED_INPUT_FIELD_NAMES:
|
||||
raise InvalidFieldError(
|
||||
f'Invalid field name "{name}" on "{model_type}" (internal field without reserved name)'
|
||||
)
|
||||
|
||||
# node attribute fields *must* be in the reserved list
|
||||
if (
|
||||
field_kind == FieldKind.NodeAttribute.value
|
||||
field_kind is FieldKind.NodeAttribute
|
||||
and name not in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES
|
||||
and name not in RESERVED_OUTPUT_FIELD_NAMES
|
||||
):
|
||||
raise InvalidFieldError(
|
||||
f"{model_type}.{name}: Invalid field name (node attribute field without reserved name)"
|
||||
f'Invalid field name "{name}" on "{model_type}" (node attribute field without reserved name)'
|
||||
)
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
ui_model_base = field.json_schema_extra.get("ui_model_base", None)
|
||||
ui_model_type = field.json_schema_extra.get("ui_model_type", None)
|
||||
ui_model_variant = field.json_schema_extra.get("ui_model_variant", None)
|
||||
ui_model_format = field.json_schema_extra.get("ui_model_format", None)
|
||||
|
||||
if ui_type is not None:
|
||||
# There are 3 cases where we may need to take action:
|
||||
#
|
||||
# 1. The ui_type is a migratable, deprecated value. For example, ui_type=UIType.MainModel value is
|
||||
# deprecated and should be migrated to:
|
||||
# - ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]
|
||||
# - ui_model_type=[ModelType.Main]
|
||||
#
|
||||
# 2. ui_type was set in conjunction with any of the new ui_model_[base|type|variant|format] fields, which
|
||||
# is not allowed (they are mutually exclusive). In this case, we ignore ui_type and log a warning.
|
||||
#
|
||||
# 3. ui_type is a deprecated value that is not migratable. For example, ui_type=UIType.Image is deprecated;
|
||||
# Image fields are now automatically detected based on the field's type annotation. In this case, we
|
||||
# ignore ui_type and log a warning.
|
||||
#
|
||||
# The cases must be checked in this order to ensure proper handling.
|
||||
|
||||
# Easier to work with as an enum
|
||||
ui_type = UIType(ui_type)
|
||||
|
||||
# The enum member values are not always the same as their names - we want to log the name so the user can
|
||||
# easily review their code and see where the deprecated enum member is used.
|
||||
human_readable_name = f"UIType.{ui_type.name}"
|
||||
|
||||
# Case 1: migratable deprecated value
|
||||
did_migrate = migrate_model_ui_type(ui_type, field.json_schema_extra)
|
||||
|
||||
if did_migrate:
|
||||
logger.warning(
|
||||
f'{model_type}.{name}: Migrated deprecated "ui_type" "{human_readable_name}" to new ui_model_[base|type|variant|format] fields'
|
||||
)
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
|
||||
# Case 2: mutually exclusive with new fields
|
||||
elif (
|
||||
ui_model_base is not None
|
||||
or ui_model_type is not None
|
||||
or ui_model_variant is not None
|
||||
or ui_model_format is not None
|
||||
):
|
||||
logger.warning(
|
||||
f'{model_type}.{name}: "ui_type" is mutually exclusive with "ui_model_[base|type|format|variant]", ignoring "ui_type"'
|
||||
)
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
|
||||
# Case 3: deprecated value that is not migratable
|
||||
elif ui_type.startswith("DEPRECATED_"):
|
||||
logger.warning(f'{model_type}.{name}: Deprecated "ui_type" "{human_readable_name}", ignoring')
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring")
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
|
||||
class NoDefaultSentinel:
|
||||
pass
|
||||
|
||||
|
||||
def validate_field_default(
|
||||
cls_name: str, field_name: str, invocation_type: str, annotation: Any, field_info: FieldInfo
|
||||
) -> None:
|
||||
"""Validates the default value of a field against its pydantic field definition."""
|
||||
|
||||
assert isinstance(field_info.json_schema_extra, dict), "json_schema_extra is not a dict"
|
||||
|
||||
# By the time we are doing this, we've already done some pydantic magic by overriding the original default value.
|
||||
# We store the original default value in the json_schema_extra dict, so we can validate it here.
|
||||
orig_default = field_info.json_schema_extra.get("orig_default", NoDefaultSentinel)
|
||||
|
||||
if orig_default is NoDefaultSentinel:
|
||||
return
|
||||
|
||||
# To validate the default value, we can create a temporary pydantic model with the field we are validating as its
|
||||
# only field. Then validate the default value against this temporary model.
|
||||
TempDefaultValidator = cast(BaseModel, create_model(cls_name, **{field_name: (annotation, field_info)}))
|
||||
|
||||
try:
|
||||
TempDefaultValidator.model_validate({field_name: orig_default})
|
||||
except Exception as e:
|
||||
raise InvalidFieldError(
|
||||
f'Default value for field "{field_name}" on invocation "{invocation_type}" is invalid, {e}'
|
||||
) from e
|
||||
|
||||
|
||||
def is_optional(annotation: Any) -> bool:
|
||||
"""
|
||||
Checks if the given annotation is optional (i.e. Optional[X], Union[X, None] or X | None).
|
||||
"""
|
||||
origin = typing.get_origin(annotation)
|
||||
# PEP 604 unions (int|None) have origin types.UnionType
|
||||
is_union = origin is typing.Union or origin is types.UnionType
|
||||
if not is_union:
|
||||
return False
|
||||
return any(arg is type(None) for arg in typing.get_args(annotation))
|
||||
|
||||
|
||||
def invocation(
|
||||
invocation_type: str,
|
||||
title: Optional[str] = None,
|
||||
@@ -616,7 +417,6 @@ def invocation(
|
||||
version: Optional[str] = None,
|
||||
use_cache: Optional[bool] = True,
|
||||
classification: Classification = Classification.Stable,
|
||||
bottleneck: Bottleneck = Bottleneck.GPU,
|
||||
) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]:
|
||||
"""
|
||||
Registers an invocation.
|
||||
@@ -628,7 +428,6 @@ def invocation(
|
||||
:param Optional[str] version: Adds a version to the invocation. Must be a valid semver string. Defaults to None.
|
||||
:param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor.
|
||||
:param Classification classification: The classification of the invocation. Defaults to FeatureClassification.Stable. Use Beta or Prototype if the invocation is unstable.
|
||||
:param Bottleneck bottleneck: The bottleneck of the invocation. Defaults to Bottleneck.GPU. Use Network if the invocation is network-bound.
|
||||
"""
|
||||
|
||||
def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
|
||||
@@ -637,38 +436,19 @@ def invocation(
|
||||
if re.compile(r"^\S+$").match(invocation_type) is None:
|
||||
raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"')
|
||||
|
||||
# The node pack is the module name - will be "invokeai" for built-in nodes
|
||||
node_pack = cls.__module__.split(".")[0]
|
||||
if invocation_type in BaseInvocation.get_invocation_types():
|
||||
raise ValueError(f'Invocation type "{invocation_type}" already exists')
|
||||
|
||||
validate_fields(cls.model_fields, invocation_type)
|
||||
|
||||
fields: dict[str, tuple[Any, FieldInfo]] = {}
|
||||
|
||||
original_model_fields: dict[str, OriginalModelField] = {}
|
||||
|
||||
for field_name, field_info in cls.model_fields.items():
|
||||
annotation = field_info.annotation
|
||||
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
|
||||
assert isinstance(field_info.json_schema_extra, dict), (
|
||||
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
|
||||
)
|
||||
|
||||
original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
|
||||
validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info)
|
||||
|
||||
if field_info.default is None and not is_optional(annotation):
|
||||
annotation = annotation | None
|
||||
|
||||
fields[field_name] = (annotation, field_info)
|
||||
|
||||
# Add OpenAPI schema extras
|
||||
uiconfig: dict[str, Any] = {}
|
||||
uiconfig["title"] = title
|
||||
uiconfig["tags"] = tags
|
||||
uiconfig["category"] = category
|
||||
uiconfig["classification"] = classification
|
||||
uiconfig["node_pack"] = node_pack
|
||||
# The node pack is the module name - will be "invokeai" for built-in nodes
|
||||
uiconfig["node_pack"] = cls.__module__.split(".")[0]
|
||||
|
||||
if version is not None:
|
||||
try:
|
||||
@@ -677,7 +457,7 @@ def invocation(
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
uiconfig["version"] = version
|
||||
else:
|
||||
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
uiconfig["version"] = "1.0.0"
|
||||
|
||||
cls.UIConfig = UIConfigBase(**uiconfig)
|
||||
@@ -685,8 +465,6 @@ def invocation(
|
||||
if use_cache is not None:
|
||||
cls.model_fields["use_cache"].default = use_cache
|
||||
|
||||
cls.bottleneck = bottleneck
|
||||
|
||||
# Add the invocation type to the model.
|
||||
|
||||
# You'd be tempted to just add the type field and rebuild the model, like this:
|
||||
@@ -696,55 +474,24 @@ def invocation(
|
||||
# Unfortunately, because the `GraphInvocation` uses a forward ref in its `graph` field's annotation, this does
|
||||
# not work. Instead, we have to create a new class with the type field and patch the original class with it.
|
||||
|
||||
invocation_type_annotation = Literal[invocation_type]
|
||||
|
||||
# Field() returns an instance of FieldInfo, but thanks to a pydantic implementation detail, it is _typed_ as Any.
|
||||
# This cast makes the type annotation match the class's true type.
|
||||
invocation_type_field_info = cast(
|
||||
FieldInfo,
|
||||
Field(title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}),
|
||||
invocation_type_annotation = Literal[invocation_type] # type: ignore
|
||||
invocation_type_field = Field(
|
||||
title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
)
|
||||
|
||||
fields["type"] = (invocation_type_annotation, invocation_type_field_info)
|
||||
|
||||
# Invocation outputs must be registered using the @invocation_output decorator, but it is possible that the
|
||||
# output is registered _after_ this invocation is registered. It depends on module import ordering.
|
||||
#
|
||||
# We can only confirm the output for an invocation is registered after all modules are imported. There's
|
||||
# only really one good time to do that - during application startup, in `run_app.py`, after loading all
|
||||
# custom nodes.
|
||||
#
|
||||
# We can still do some basic validation here - ensure the invoke method is defined and returns an instance
|
||||
# of BaseInvocationOutput.
|
||||
|
||||
# Validate the `invoke()` method is implemented
|
||||
if "invoke" in cls.__abstractmethods__:
|
||||
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
|
||||
|
||||
# And validate that `invoke()` returns a subclass of `BaseInvocationOutput
|
||||
invoke_return_annotation = signature(cls.invoke).return_annotation
|
||||
|
||||
try:
|
||||
# TODO(psyche): If `invoke()` is not defined, `return_annotation` ends up as the string "BaseInvocationOutput"
|
||||
# instead of the class `BaseInvocationOutput`. This may be a pydantic bug: https://github.com/pydantic/pydantic/issues/7978
|
||||
if isinstance(invoke_return_annotation, str):
|
||||
invoke_return_annotation = getattr(sys.modules[cls.__module__], invoke_return_annotation)
|
||||
|
||||
assert invoke_return_annotation is not BaseInvocationOutput
|
||||
assert issubclass(invoke_return_annotation, BaseInvocationOutput)
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
f'Invocation "{invocation_type}" must have a return annotation of a subclass of BaseInvocationOutput (got "{invoke_return_annotation}")'
|
||||
)
|
||||
|
||||
docstring = cls.__doc__
|
||||
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore
|
||||
new_class.__doc__ = docstring
|
||||
new_class._original_model_fields = original_model_fields
|
||||
cls = create_model(
|
||||
cls.__qualname__,
|
||||
__base__=cls,
|
||||
__module__=cls.__module__,
|
||||
type=(invocation_type_annotation, invocation_type_field),
|
||||
)
|
||||
cls.__doc__ = docstring
|
||||
|
||||
InvocationRegistry.register_invocation(new_class)
|
||||
# TODO: how to type this correctly? it's typed as ModelMetaclass, a private class in pydantic
|
||||
BaseInvocation.register_invocation(cls) # type: ignore
|
||||
|
||||
return new_class
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -767,41 +514,29 @@ def invocation_output(
|
||||
if re.compile(r"^\S+$").match(output_type) is None:
|
||||
raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"')
|
||||
|
||||
if output_type in BaseInvocationOutput.get_output_types():
|
||||
raise ValueError(f'Invocation type "{output_type}" already exists')
|
||||
|
||||
validate_fields(cls.model_fields, output_type)
|
||||
|
||||
fields: dict[str, tuple[Any, FieldInfo]] = {}
|
||||
|
||||
for field_name, field_info in cls.model_fields.items():
|
||||
annotation = field_info.annotation
|
||||
assert annotation is not None, f"{field_name} on invocation output {output_type} has no type annotation."
|
||||
assert isinstance(field_info.json_schema_extra, dict), (
|
||||
f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?"
|
||||
)
|
||||
|
||||
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
|
||||
if field_info.default is not PydanticUndefined and is_optional(annotation):
|
||||
annotation = annotation | None
|
||||
fields[field_name] = (annotation, field_info)
|
||||
|
||||
# Add the output type to the model.
|
||||
output_type_annotation = Literal[output_type]
|
||||
|
||||
# Field() returns an instance of FieldInfo, but thanks to a pydantic implementation detail, it is _typed_ as Any.
|
||||
# This cast makes the type annotation match the class's true type.
|
||||
output_type_field_info = cast(
|
||||
FieldInfo,
|
||||
Field(title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}),
|
||||
output_type_annotation = Literal[output_type] # type: ignore
|
||||
output_type_field = Field(
|
||||
title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
)
|
||||
|
||||
fields["type"] = (output_type_annotation, output_type_field_info)
|
||||
|
||||
docstring = cls.__doc__
|
||||
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields)
|
||||
new_class.__doc__ = docstring
|
||||
cls = create_model(
|
||||
cls.__qualname__,
|
||||
__base__=cls,
|
||||
__module__=cls.__module__,
|
||||
type=(output_type_annotation, output_type_field),
|
||||
)
|
||||
cls.__doc__ = docstring
|
||||
|
||||
InvocationRegistry.register_output(new_class)
|
||||
BaseInvocationOutput.register_output(cls) # type: ignore # TODO: how to type this correctly?
|
||||
|
||||
return new_class
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import (
|
||||
FloatOutput,
|
||||
ImageOutput,
|
||||
IntegerOutput,
|
||||
StringOutput,
|
||||
)
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
BATCH_GROUP_IDS = Literal[
|
||||
"None",
|
||||
"Group 1",
|
||||
"Group 2",
|
||||
"Group 3",
|
||||
"Group 4",
|
||||
"Group 5",
|
||||
]
|
||||
|
||||
|
||||
class NotExecutableNodeError(Exception):
|
||||
def __init__(self, message: str = "This class should never be executed or instantiated directly."):
|
||||
super().__init__(message)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class BaseBatchInvocation(BaseInvocation):
|
||||
batch_group_id: BATCH_GROUP_IDS = InputField(
|
||||
default="None",
|
||||
description="The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size.",
|
||||
input=Input.Direct,
|
||||
title="Batch Group",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(
|
||||
min_length=1,
|
||||
description="The images to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("image_generator_output")
|
||||
class ImageGeneratorOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of boards"""
|
||||
|
||||
images: list[ImageField] = OutputField(description="The generated images")
|
||||
|
||||
|
||||
class ImageGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_generator",
|
||||
title="Image Generator",
|
||||
tags=["primitives", "board", "image", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageGenerator(BaseInvocation):
|
||||
"""Generated a collection of images for use in a batched generation"""
|
||||
|
||||
generator: ImageGeneratorField = InputField(
|
||||
description="The image generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"string_batch",
|
||||
title="String Batch",
|
||||
tags=["primitives", "string", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class StringBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each string in the batch."""
|
||||
|
||||
strings: list[str] = InputField(
|
||||
min_length=1,
|
||||
description="The strings to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> StringOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("string_generator_output")
|
||||
class StringGeneratorOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of strings"""
|
||||
|
||||
strings: list[str] = OutputField(description="The generated strings")
|
||||
|
||||
|
||||
class StringGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"string_generator",
|
||||
title="String Generator",
|
||||
tags=["primitives", "string", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class StringGenerator(BaseInvocation):
|
||||
"""Generated a range of strings for use in a batched generation"""
|
||||
|
||||
generator: StringGeneratorField = InputField(
|
||||
description="The string generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> StringGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"integer_batch",
|
||||
title="Integer Batch",
|
||||
tags=["primitives", "integer", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class IntegerBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each integer in the batch."""
|
||||
|
||||
integers: list[int] = InputField(
|
||||
min_length=1,
|
||||
description="The integers to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("integer_generator_output")
|
||||
class IntegerGeneratorOutput(BaseInvocationOutput):
|
||||
integers: list[int] = OutputField(description="The generated integers")
|
||||
|
||||
|
||||
class IntegerGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"integer_generator",
|
||||
title="Integer Generator",
|
||||
tags=["primitives", "int", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class IntegerGenerator(BaseInvocation):
|
||||
"""Generated a range of integers for use in a batched generation"""
|
||||
|
||||
generator: IntegerGeneratorField = InputField(
|
||||
description="The integer generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"float_batch",
|
||||
title="Float Batch",
|
||||
tags=["primitives", "float", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class FloatBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each float in the batch."""
|
||||
|
||||
floats: list[float] = InputField(
|
||||
min_length=1,
|
||||
description="The floats to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("float_generator_output")
|
||||
class FloatGeneratorOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of floats"""
|
||||
|
||||
floats: list[float] = OutputField(description="The generated floats")
|
||||
|
||||
|
||||
class FloatGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"float_generator",
|
||||
title="Float Generator",
|
||||
tags=["primitives", "float", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class FloatGenerator(BaseInvocation):
|
||||
"""Generated a range of floats for use in a batched generation"""
|
||||
|
||||
generator: FloatGeneratorField = InputField(
|
||||
description="The float generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
@@ -1,120 +1,98 @@
|
||||
from typing import Optional, Union
|
||||
from typing import Any, Union
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def slerp(
|
||||
t: Union[float, np.ndarray],
|
||||
v0: Union[torch.Tensor, np.ndarray],
|
||||
v1: Union[torch.Tensor, np.ndarray],
|
||||
device: torch.device,
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
):
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(device)
|
||||
|
||||
return v2
|
||||
|
||||
|
||||
@invocation(
|
||||
"lblend",
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend", "mask"],
|
||||
tags=["latents", "blend"],
|
||||
category="latents",
|
||||
version="1.1.0",
|
||||
version="1.0.3",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.
|
||||
Latents must have same size. Masking functionality added by @dwringer."""
|
||||
"""Blend two latents using a given alpha. Latents must have same size."""
|
||||
|
||||
latents_a: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
latents_b: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
mask: Optional[ImageField] = InputField(default=None, description="Mask for blending in latents B")
|
||||
alpha: float = InputField(ge=0, default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
mask_image = mask_image.convert("L")
|
||||
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
if mask_tensor.dim() == 3:
|
||||
mask_tensor = mask_tensor.unsqueeze(0)
|
||||
return mask_tensor
|
||||
|
||||
def replace_tensor_from_masked_tensor(
|
||||
self, tensor: torch.Tensor, other_tensor: torch.Tensor, mask_tensor: torch.Tensor
|
||||
):
|
||||
output = tensor.clone()
|
||||
mask_tensor = mask_tensor.expand(output.shape)
|
||||
if output.dtype != torch.float16:
|
||||
output = torch.add(output, mask_tensor * torch.sub(other_tensor, tensor))
|
||||
else:
|
||||
output = torch.add(output, mask_tensor.half() * torch.sub(other_tensor, tensor))
|
||||
return output
|
||||
latents_a: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
latents_b: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents_a = context.tensors.load(self.latents_a.latents_name)
|
||||
latents_b = context.tensors.load(self.latents_b.latents_name)
|
||||
if self.mask is None:
|
||||
mask_tensor = torch.zeros(latents_a.shape[-2:])
|
||||
else:
|
||||
mask_tensor = self.prep_mask_tensor(context.images.get_pil(self.mask.image_name))
|
||||
mask_tensor = tv_resize(mask_tensor, latents_a.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
|
||||
latents_b = self.replace_tensor_from_masked_tensor(latents_b, latents_a, mask_tensor)
|
||||
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise ValueError("Latents to blend must be the same size.")
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
def slerp(
|
||||
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
||||
v0: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
v1: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
) -> Union[torch.Tensor, npt.NDArray[Any]]:
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
|
||||
return v2_torch
|
||||
else:
|
||||
assert isinstance(v2, np.ndarray)
|
||||
return v2
|
||||
|
||||
# blend
|
||||
blended_latents = slerp(self.alpha, latents_a, latents_b, device)
|
||||
bl = slerp(self.alpha, latents_a, latents_b)
|
||||
assert isinstance(bl, torch.Tensor)
|
||||
blended_latents: torch.Tensor = bl # for type checking convenience
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
blended_latents = blended_latents.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
|
||||
|
||||
@@ -1,363 +0,0 @@
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
from diffusers.models.transformers.transformer_cogview4 import CogView4Transformer2DModel
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
CogView4ConditioningField,
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import CogView4ConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"cogview4_denoise",
|
||||
title="Denoise - CogView4",
|
||||
tags=["image", "cogview4"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class CogView4DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run the denoising process with a CogView4 model."""
|
||||
|
||||
# If latents is provided, this means we are doing image-to-image.
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None, description=FieldDescriptions.latents, input=Input.Connection
|
||||
)
|
||||
# denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None, description=FieldDescriptions.denoise_mask, input=Input.Connection
|
||||
)
|
||||
denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.cogview4_model, input=Input.Connection, title="Transformer"
|
||||
)
|
||||
positive_conditioning: CogView4ConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_conditioning: CogView4ConditioningField = InputField(
|
||||
description=FieldDescriptions.negative_cond, input=Input.Connection
|
||||
)
|
||||
cfg_scale: float | list[float] = InputField(default=3.5, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
||||
width: int = InputField(default=1024, multiple_of=32, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=32, description="Height of the generated image.")
|
||||
steps: int = InputField(default=25, gt=0, description=FieldDescriptions.steps)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = self._run_diffusion(context)
|
||||
latents = latents.detach().to("cpu")
|
||||
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
|
||||
"""Prepare the inpaint mask.
|
||||
- Loads the mask
|
||||
- Resizes if necessary
|
||||
- Casts to same device/dtype as latents
|
||||
|
||||
Args:
|
||||
context (InvocationContext): The invocation context, for loading the inpaint mask.
|
||||
latents (torch.Tensor): A latent image tensor. Used to determine the target shape, device, and dtype for the
|
||||
inpaint mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor | None: Inpaint mask. Values of 0.0 represent the regions to be fully denoised, and 1.0
|
||||
represent the regions to be preserved.
|
||||
"""
|
||||
if self.denoise_mask is None:
|
||||
return None
|
||||
mask = context.tensors.load(self.denoise_mask.mask_name)
|
||||
|
||||
# The input denoise_mask contains values in [0, 1], where 0.0 represents the regions to be fully denoised, and
|
||||
# 1.0 represents the regions to be preserved.
|
||||
# We invert the mask so that the regions to be preserved are 0.0 and the regions to be denoised are 1.0.
|
||||
mask = 1.0 - mask
|
||||
|
||||
_, _, latent_height, latent_width = latents.shape
|
||||
mask = tv_resize(
|
||||
img=mask,
|
||||
size=[latent_height, latent_width],
|
||||
interpolation=tv_transforms.InterpolationMode.BILINEAR,
|
||||
antialias=False,
|
||||
)
|
||||
|
||||
mask = mask.to(device=latents.device, dtype=latents.dtype)
|
||||
return mask
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
conditioning_name: str,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> torch.Tensor:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
cogview4_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(cogview4_conditioning, CogView4ConditioningInfo)
|
||||
cogview4_conditioning = cogview4_conditioning.to(dtype=dtype, device=device)
|
||||
|
||||
return cogview4_conditioning.glm_embeds
|
||||
|
||||
def _get_noise(
|
||||
self,
|
||||
batch_size: int,
|
||||
num_channels_latents: int,
|
||||
height: int,
|
||||
width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
seed: int,
|
||||
) -> torch.Tensor:
|
||||
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
|
||||
rand_device = "cpu"
|
||||
rand_dtype = torch.float16
|
||||
|
||||
return torch.randn(
|
||||
batch_size,
|
||||
num_channels_latents,
|
||||
int(height) // LATENT_SCALE_FACTOR,
|
||||
int(width) // LATENT_SCALE_FACTOR,
|
||||
device=rand_device,
|
||||
dtype=rand_dtype,
|
||||
generator=torch.Generator(device=rand_device).manual_seed(seed),
|
||||
).to(device=device, dtype=dtype)
|
||||
|
||||
def _prepare_cfg_scale(self, num_timesteps: int) -> list[float]:
|
||||
"""Prepare the CFG scale list.
|
||||
|
||||
Args:
|
||||
num_timesteps (int): The number of timesteps in the scheduler. Could be different from num_steps depending
|
||||
on the scheduler used (e.g. higher order schedulers).
|
||||
|
||||
Returns:
|
||||
list[float]: _description_
|
||||
"""
|
||||
if isinstance(self.cfg_scale, float):
|
||||
cfg_scale = [self.cfg_scale] * num_timesteps
|
||||
elif isinstance(self.cfg_scale, list):
|
||||
assert len(self.cfg_scale) == num_timesteps
|
||||
cfg_scale = self.cfg_scale
|
||||
else:
|
||||
raise ValueError(f"Invalid CFG scale type: {type(self.cfg_scale)}")
|
||||
|
||||
return cfg_scale
|
||||
|
||||
def _convert_timesteps_to_sigmas(self, image_seq_len: int, timesteps: torch.Tensor) -> list[float]:
|
||||
# The logic to prepare the timestep / sigma schedule is based on:
|
||||
# https://github.com/huggingface/diffusers/blob/b38450d5d2e5b87d5ff7088ee5798c85587b9635/src/diffusers/pipelines/cogview4/pipeline_cogview4.py#L575-L595
|
||||
# The default FlowMatchEulerDiscreteScheduler configs are based on:
|
||||
# https://huggingface.co/THUDM/CogView4-6B/blob/fb6f57289c73ac6d139e8d81bd5a4602d1877847/scheduler/scheduler_config.json
|
||||
# This implementation differs slightly from the original for the sake of simplicity (differs in terminal value
|
||||
# handling, not quantizing timesteps to integers, etc.).
|
||||
|
||||
def calculate_timestep_shift(
|
||||
image_seq_len: int, base_seq_len: int = 256, base_shift: float = 0.25, max_shift: float = 0.75
|
||||
) -> float:
|
||||
m = (image_seq_len / base_seq_len) ** 0.5
|
||||
mu = m * max_shift + base_shift
|
||||
return mu
|
||||
|
||||
def time_shift_linear(mu: float, sigma: float, t: torch.Tensor) -> torch.Tensor:
|
||||
return mu / (mu + (1 / t - 1) ** sigma)
|
||||
|
||||
mu = calculate_timestep_shift(image_seq_len)
|
||||
sigmas = time_shift_linear(mu, 1.0, timesteps)
|
||||
return sigmas.tolist()
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = torch.bfloat16
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
assert isinstance(transformer_info.model, CogView4Transformer2DModel)
|
||||
|
||||
# Load/process the conditioning data.
|
||||
# TODO(ryand): Make CFG optional.
|
||||
do_classifier_free_guidance = True
|
||||
pos_prompt_embeds = self._load_text_conditioning(
|
||||
context=context,
|
||||
conditioning_name=self.positive_conditioning.conditioning_name,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
)
|
||||
neg_prompt_embeds = self._load_text_conditioning(
|
||||
context=context,
|
||||
conditioning_name=self.negative_conditioning.conditioning_name,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
# Prepare misc. conditioning variables.
|
||||
# TODO(ryand): We could expose these as params (like with SDXL). But, we should experiment to see if they are
|
||||
# useful first.
|
||||
original_size = torch.tensor([(self.height, self.width)], dtype=pos_prompt_embeds.dtype, device=device)
|
||||
target_size = torch.tensor([(self.height, self.width)], dtype=pos_prompt_embeds.dtype, device=device)
|
||||
crops_coords_top_left = torch.tensor([(0, 0)], dtype=pos_prompt_embeds.dtype, device=device)
|
||||
|
||||
# Prepare the timestep / sigma schedule.
|
||||
patch_size = transformer_info.model.config.patch_size # type: ignore
|
||||
assert isinstance(patch_size, int)
|
||||
image_seq_len = ((self.height // LATENT_SCALE_FACTOR) * (self.width // LATENT_SCALE_FACTOR)) // (patch_size**2)
|
||||
# We add an extra step to the end to account for the final timestep of 0.0.
|
||||
timesteps: list[float] = torch.linspace(1, 0, self.steps + 1).tolist()
|
||||
# Clip the timesteps schedule based on denoising_start and denoising_end.
|
||||
timesteps = clip_timestep_schedule_fractional(timesteps, self.denoising_start, self.denoising_end)
|
||||
sigmas = self._convert_timesteps_to_sigmas(image_seq_len, torch.tensor(timesteps))
|
||||
total_steps = len(timesteps) - 1
|
||||
|
||||
# Prepare the CFG scale list.
|
||||
cfg_scale = self._prepare_cfg_scale(total_steps)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
init_latents = init_latents.to(device=device, dtype=inference_dtype)
|
||||
|
||||
# Generate initial latent noise.
|
||||
num_channels_latents = transformer_info.model.config.in_channels # type: ignore
|
||||
assert isinstance(num_channels_latents, int)
|
||||
noise = self._get_noise(
|
||||
batch_size=1,
|
||||
num_channels_latents=num_channels_latents,
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
seed=self.seed,
|
||||
)
|
||||
|
||||
# Prepare input latent image.
|
||||
if init_latents is not None:
|
||||
# Noise the init_latents by the appropriate amount for the first timestep.
|
||||
s_0 = sigmas[0]
|
||||
latents = s_0 * noise + (1.0 - s_0) * init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
raise ValueError("denoising_start should be 0 when initial latents are not provided.")
|
||||
latents = noise
|
||||
|
||||
# If len(timesteps) == 1, then short-circuit. We are just noising the input latents, but not taking any
|
||||
# denoising steps.
|
||||
if len(timesteps) <= 1:
|
||||
return latents
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_mask = self._prep_inpaint_mask(context, latents)
|
||||
inpaint_extension: RectifiedFlowInpaintExtension | None = None
|
||||
if inpaint_mask is not None:
|
||||
assert init_latents is not None
|
||||
inpaint_extension = RectifiedFlowInpaintExtension(
|
||||
init_latents=init_latents,
|
||||
inpaint_mask=inpaint_mask,
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
step_callback = self._build_step_callback(context)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=0,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(timesteps[0]),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
with transformer_info.model_on_device() as (_, transformer):
|
||||
assert isinstance(transformer, CogView4Transformer2DModel)
|
||||
|
||||
# Denoising loop
|
||||
for step_idx in tqdm(range(total_steps)):
|
||||
t_curr = timesteps[step_idx]
|
||||
sigma_curr = sigmas[step_idx]
|
||||
sigma_prev = sigmas[step_idx + 1]
|
||||
|
||||
# Expand the timestep to match the latent model input.
|
||||
# Multiply by 1000 to match the default FlowMatchEulerDiscreteScheduler num_train_timesteps.
|
||||
timestep = torch.tensor([t_curr * 1000], device=device).expand(latents.shape[0])
|
||||
|
||||
# TODO(ryand): Support both sequential and batched CFG inference.
|
||||
noise_pred_cond = transformer(
|
||||
hidden_states=latents,
|
||||
encoder_hidden_states=pos_prompt_embeds,
|
||||
timestep=timestep,
|
||||
original_size=original_size,
|
||||
target_size=target_size,
|
||||
crop_coords=crops_coords_top_left,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
# Apply CFG.
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond = transformer(
|
||||
hidden_states=latents,
|
||||
encoder_hidden_states=neg_prompt_embeds,
|
||||
timestep=timestep,
|
||||
original_size=original_size,
|
||||
target_size=target_size,
|
||||
crop_coords=crops_coords_top_left,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
noise_pred = noise_pred_uncond + cfg_scale[step_idx] * (noise_pred_cond - noise_pred_uncond)
|
||||
else:
|
||||
noise_pred = noise_pred_cond
|
||||
|
||||
# Compute the previous noisy sample x_t -> x_t-1.
|
||||
latents_dtype = latents.dtype
|
||||
# TODO(ryand): Is casting to float32 necessary for precision/stability? I copied this from SD3.
|
||||
latents = latents.to(dtype=torch.float32)
|
||||
latents = latents + (sigma_prev - sigma_curr) * noise_pred
|
||||
latents = latents.to(dtype=latents_dtype)
|
||||
|
||||
if inpaint_extension is not None:
|
||||
latents = inpaint_extension.merge_intermediate_latents_with_init_latents(latents, sigma_prev)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=step_idx + 1,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(t_curr),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
return latents
|
||||
|
||||
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
context.util.sd_step_callback(state, BaseModelType.CogView4)
|
||||
|
||||
return step_callback
|
||||
@@ -1,76 +0,0 @@
|
||||
import einops
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
|
||||
|
||||
# TODO(ryand): This is effectively a copy of SD3ImageToLatentsInvocation and a subset of ImageToLatentsInvocation. We
|
||||
# should refactor to avoid this duplication.
|
||||
|
||||
|
||||
@invocation(
|
||||
"cogview4_i2l",
|
||||
title="Image to Latents - CogView4",
|
||||
tags=["image", "latents", "vae", "i2l", "cogview4"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates latents from an image."""
|
||||
|
||||
image: ImageField = InputField(description="The image to encode.")
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
estimated_working_memory = estimate_vae_working_memory_cogview4(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae.dtype)
|
||||
with torch.inference_mode():
|
||||
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
||||
# TODO: Use seed to make sampling reproducible.
|
||||
latents: torch.Tensor = image_tensor_dist.sample().to(dtype=vae.dtype)
|
||||
|
||||
latents = vae.config.scaling_factor * latents
|
||||
|
||||
return latents
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
@@ -1,79 +0,0 @@
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
|
||||
|
||||
# TODO(ryand): This is effectively a copy of SD3LatentsToImageInvocation and a subset of LatentsToImageInvocation. We
|
||||
# should refactor to avoid this duplication.
|
||||
|
||||
|
||||
@invocation(
|
||||
"cogview4_l2i",
|
||||
title="Latents to Image - CogView4",
|
||||
tags=["latents", "image", "vae", "l2i", "cogview4"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class CogView4LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
estimated_working_memory = estimate_vae_working_memory_cogview4(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
):
|
||||
context.util.signal_progress("Running VAE")
|
||||
assert isinstance(vae, (AutoencoderKL))
|
||||
latents = latents.to(TorchDevice.choose_torch_device())
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
tiling_context = nullcontext()
|
||||
|
||||
# clear memory as vae decode can request a lot
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
with torch.inference_mode(), tiling_context:
|
||||
# copied from diffusers pipeline
|
||||
latents = latents / vae.config.scaling_factor
|
||||
img = vae.decode(latents, return_dict=False)[0]
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
img = rearrange(img[0], "c h w -> h w c") # noqa: F821
|
||||
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
image_dto = context.images.save(image=img_pil)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
@@ -1,57 +0,0 @@
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
|
||||
from invokeai.app.invocations.model import (
|
||||
GlmEncoderField,
|
||||
ModelIdentifierField,
|
||||
TransformerField,
|
||||
VAEField,
|
||||
)
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||
|
||||
|
||||
@invocation_output("cogview4_model_loader_output")
|
||||
class CogView4ModelLoaderOutput(BaseInvocationOutput):
|
||||
"""CogView4 base model loader output."""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
glm_encoder: GlmEncoderField = OutputField(description=FieldDescriptions.glm_encoder, title="GLM Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation(
|
||||
"cogview4_model_loader",
|
||||
title="Main Model - CogView4",
|
||||
tags=["model", "cogview4"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class CogView4ModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a CogView4 base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.cogview4_model,
|
||||
input=Input.Direct,
|
||||
ui_model_base=BaseModelType.CogView4,
|
||||
ui_model_type=ModelType.Main,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CogView4ModelLoaderOutput:
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
glm_tokenizer = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
glm_encoder = self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
return CogView4ModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
glm_encoder=GlmEncoderField(tokenizer=glm_tokenizer, text_encoder=glm_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
@@ -1,92 +0,0 @@
|
||||
import torch
|
||||
from transformers import GlmModel, PreTrainedTokenizerFast
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, UIComponent
|
||||
from invokeai.app.invocations.model import GlmEncoderField
|
||||
from invokeai.app.invocations.primitives import CogView4ConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
CogView4ConditioningInfo,
|
||||
ConditioningFieldData,
|
||||
)
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
# The CogView4 GLM Text Encoder max sequence length set based on the default in diffusers.
|
||||
COGVIEW4_GLM_MAX_SEQ_LEN = 1024
|
||||
|
||||
|
||||
@invocation(
|
||||
"cogview4_text_encoder",
|
||||
title="Prompt - CogView4",
|
||||
tags=["prompt", "conditioning", "cogview4"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class CogView4TextEncoderInvocation(BaseInvocation):
|
||||
"""Encodes and preps a prompt for a cogview4 image."""
|
||||
|
||||
prompt: str = InputField(description="Text prompt to encode.", ui_component=UIComponent.Textarea)
|
||||
glm_encoder: GlmEncoderField = InputField(
|
||||
title="GLM Encoder",
|
||||
description=FieldDescriptions.glm_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CogView4ConditioningOutput:
|
||||
glm_embeds = self._glm_encode(context, max_seq_len=COGVIEW4_GLM_MAX_SEQ_LEN)
|
||||
conditioning_data = ConditioningFieldData(conditionings=[CogView4ConditioningInfo(glm_embeds=glm_embeds)])
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return CogView4ConditioningOutput.build(conditioning_name)
|
||||
|
||||
def _glm_encode(self, context: InvocationContext, max_seq_len: int) -> torch.Tensor:
|
||||
prompt = [self.prompt]
|
||||
|
||||
# TODO(ryand): Add model inputs to the invocation rather than hard-coding.
|
||||
with (
|
||||
context.models.load(self.glm_encoder.text_encoder).model_on_device() as (_, glm_text_encoder),
|
||||
context.models.load(self.glm_encoder.tokenizer).model_on_device() as (_, glm_tokenizer),
|
||||
):
|
||||
context.util.signal_progress("Running GLM text encoder")
|
||||
assert isinstance(glm_text_encoder, GlmModel)
|
||||
assert isinstance(glm_tokenizer, PreTrainedTokenizerFast)
|
||||
|
||||
text_inputs = glm_tokenizer(
|
||||
prompt,
|
||||
padding="longest",
|
||||
max_length=max_seq_len,
|
||||
truncation=True,
|
||||
add_special_tokens=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = glm_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
assert isinstance(text_input_ids, torch.Tensor)
|
||||
assert isinstance(untruncated_ids, torch.Tensor)
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = glm_tokenizer.batch_decode(untruncated_ids[:, max_seq_len - 1 : -1])
|
||||
context.logger.warning(
|
||||
"The following part of your input was truncated because `max_sequence_length` is set to "
|
||||
f" {max_seq_len} tokens: {removed_text}"
|
||||
)
|
||||
|
||||
current_length = text_input_ids.shape[1]
|
||||
pad_length = (16 - (current_length % 16)) % 16
|
||||
if pad_length > 0:
|
||||
pad_ids = torch.full(
|
||||
(text_input_ids.shape[0], pad_length),
|
||||
fill_value=glm_tokenizer.pad_token_id,
|
||||
dtype=text_input_ids.dtype,
|
||||
device=text_input_ids.device,
|
||||
)
|
||||
text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1)
|
||||
prompt_embeds = glm_text_encoder(
|
||||
text_input_ids.to(TorchDevice.choose_torch_device()), output_hidden_states=True
|
||||
).hidden_states[-2]
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
import torch
|
||||
from compel import Compel, ReturnedEmbeddingsType, SplitLongTextMode
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
|
||||
@@ -19,9 +19,9 @@ from invokeai.app.invocations.model import CLIPField
|
||||
from invokeai.app.invocations.primitives import ConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.ti_utils import generate_ti_list
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoRAPatcher
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
BasicConditioningInfo,
|
||||
ConditioningFieldData,
|
||||
@@ -40,10 +40,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"compel",
|
||||
title="Prompt - SD1.5",
|
||||
title="Prompt",
|
||||
tags=["prompt", "compel"],
|
||||
category="conditioning",
|
||||
version="1.2.1",
|
||||
version="1.2.0",
|
||||
)
|
||||
class CompelInvocation(BaseInvocation):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -63,28 +63,29 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
|
||||
tokenizer_info = context.models.load(self.clip.tokenizer)
|
||||
text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
|
||||
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in self.clip.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, ModelPatchRaw)
|
||||
assert isinstance(lora_info.model, LoRAModelRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
|
||||
# loras = [(context.models.get(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
ti_list = generate_ti_list(self.prompt, text_encoder_info.config.base, context)
|
||||
|
||||
with (
|
||||
# apply all patches while the model is on the target device
|
||||
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
|
||||
context.models.load(self.clip.tokenizer) as tokenizer,
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
tokenizer_info as tokenizer,
|
||||
LoRAPatcher.apply_lora_patches(
|
||||
model=text_encoder,
|
||||
patches=_lora_loader(),
|
||||
prefix="lora_te_",
|
||||
dtype=text_encoder.dtype,
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
|
||||
@@ -94,7 +95,6 @@ class CompelInvocation(BaseInvocation):
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, CLIPTextModel)
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
compel = Compel(
|
||||
@@ -103,8 +103,6 @@ class CompelInvocation(BaseInvocation):
|
||||
textual_inversion_manager=ti_manager,
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
@@ -114,13 +112,6 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
|
||||
@@ -146,7 +137,9 @@ class SDXLPromptInvocationBase:
|
||||
lora_prefix: str,
|
||||
zero_on_empty: bool,
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
||||
tokenizer_info = context.models.load(clip_field.tokenizer)
|
||||
text_encoder_info = context.models.load(clip_field.text_encoder)
|
||||
|
||||
# return zero on empty
|
||||
if prompt == "" and zero_on_empty:
|
||||
cpu_text_encoder = text_encoder_info.model
|
||||
@@ -168,11 +161,11 @@ class SDXLPromptInvocationBase:
|
||||
c_pooled = None
|
||||
return c, c_pooled
|
||||
|
||||
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
|
||||
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in clip_field.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
lora_model = lora_info.model
|
||||
assert isinstance(lora_model, ModelPatchRaw)
|
||||
assert isinstance(lora_model, LoRAModelRaw)
|
||||
yield (lora_model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
@@ -184,12 +177,11 @@ class SDXLPromptInvocationBase:
|
||||
with (
|
||||
# apply all patches while the model is on the target device
|
||||
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
|
||||
context.models.load(clip_field.tokenizer) as tokenizer,
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=text_encoder,
|
||||
tokenizer_info as tokenizer,
|
||||
LoRAPatcher.apply_lora_patches(
|
||||
text_encoder,
|
||||
patches=_lora_loader(),
|
||||
prefix=lora_prefix,
|
||||
dtype=text_encoder.dtype,
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
|
||||
@@ -199,7 +191,6 @@ class SDXLPromptInvocationBase:
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
|
||||
@@ -212,8 +203,6 @@ class SDXLPromptInvocationBase:
|
||||
truncate_long_prompts=False, # TODO:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
@@ -229,11 +218,9 @@ class SDXLPromptInvocationBase:
|
||||
else:
|
||||
c_pooled = None
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del tokenizer_info
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
@@ -245,10 +232,10 @@ class SDXLPromptInvocationBase:
|
||||
|
||||
@invocation(
|
||||
"sdxl_compel_prompt",
|
||||
title="Prompt - SDXL",
|
||||
title="SDXL Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.2.1",
|
||||
version="1.2.0",
|
||||
)
|
||||
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -339,10 +326,10 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
|
||||
@invocation(
|
||||
"sdxl_refiner_compel_prompt",
|
||||
title="Prompt - SDXL Refiner",
|
||||
title="SDXL Refiner Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.1.2",
|
||||
version="1.1.1",
|
||||
)
|
||||
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -388,10 +375,10 @@ class CLIPSkipInvocationOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"clip_skip",
|
||||
title="Apply CLIP Skip - SD1.5, SDXL",
|
||||
title="CLIP Skip",
|
||||
tags=["clipskip", "clip", "skip"],
|
||||
category="conditioning",
|
||||
version="1.1.1",
|
||||
version="1.1.0",
|
||||
)
|
||||
class CLIPSkipInvocation(BaseInvocation):
|
||||
"""Skip layers in clip text_encoder model."""
|
||||
@@ -525,7 +512,7 @@ def log_tokenization_for_text(
|
||||
usedTokens += 1
|
||||
|
||||
if usedTokens > 0:
|
||||
print(f"\n>> [TOKENLOG] Tokens {display_label or ''} ({usedTokens}):")
|
||||
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
|
||||
print(f"{tokenized}\x1b[0m")
|
||||
|
||||
if discarded != "":
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,7 @@
|
||||
from typing import Literal
|
||||
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
LATENT_SCALE_FACTOR = 8
|
||||
"""
|
||||
HACK: Many nodes are currently hard-coded to use a fixed latent scale factor of 8. This is fragile, and will need to
|
||||
@@ -10,3 +12,5 @@ The ratio of image:latent dimensions is LATENT_SCALE_FACTOR:1, or 8:1.
|
||||
|
||||
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
|
||||
"""A literal type for PIL image modes supported by Invoke"""
|
||||
|
||||
DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
# Invocations for ControlNet image preprocessors
|
||||
# initial implementation by Gregg Helt, 2023
|
||||
from typing import List, Union
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
InputField,
|
||||
OutputField,
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import (
|
||||
CONTROLNET_MODE_VALUES,
|
||||
CONTROLNET_RESIZE_VALUES,
|
||||
heuristic_resize_fast,
|
||||
)
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
|
||||
|
||||
|
||||
class ControlField(BaseModel):
|
||||
image: ImageField = Field(description="The control image")
|
||||
control_model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
||||
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = Field(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("control_output")
|
||||
class ControlOutput(BaseInvocationOutput):
|
||||
"""node output for ControlNet info"""
|
||||
|
||||
# Outputs
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation(
|
||||
"controlnet", title="ControlNet - SD1.5, SD2, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3"
|
||||
)
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model,
|
||||
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2, BaseModelType.StableDiffusionXL],
|
||||
ui_model_type=ModelType.ControlNet,
|
||||
)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self) -> "ControlNetInvocation":
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||
return ControlOutput(
|
||||
control=ControlField(
|
||||
image=self.image,
|
||||
control_model=self.control_model,
|
||||
control_weight=self.control_weight,
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
control_mode=self.control_mode,
|
||||
resize_mode=self.resize_mode,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"heuristic_resize",
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
"""Resize an image using a heuristic method. Preserves edge maps."""
|
||||
|
||||
image: ImageField = InputField(description="The image to resize")
|
||||
width: int = InputField(default=512, ge=1, description="The width to resize to (px)")
|
||||
height: int = InputField(default=512, ge=1, description="The height to resize to (px)")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize_fast(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
716
invokeai/app/invocations/controlnet_image_processors.py
Normal file
716
invokeai/app/invocations/controlnet_image_processors.py
Normal file
@@ -0,0 +1,716 @@
|
||||
# Invocations for ControlNet image preprocessors
|
||||
# initial implementation by Gregg Helt, 2023
|
||||
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
|
||||
from builtins import bool, float
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Literal, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from controlnet_aux import (
|
||||
ContentShuffleDetector,
|
||||
LeresDetector,
|
||||
MediapipeFaceDetector,
|
||||
MidasDetector,
|
||||
MLSDdetector,
|
||||
NormalBaeDetector,
|
||||
PidiNetDetector,
|
||||
SamDetector,
|
||||
ZoeDetector,
|
||||
)
|
||||
from controlnet_aux.util import HWC3, ade_palette
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from transformers import pipeline
|
||||
from transformers.pipelines import DepthEstimationPipeline
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
InputField,
|
||||
OutputField,
|
||||
UIType,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.backend.image_util.canny import get_canny_edges
|
||||
from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import DepthAnythingPipeline
|
||||
from invokeai.backend.image_util.dw_openpose import DWPOSE_MODELS, DWOpenposeDetector
|
||||
from invokeai.backend.image_util.hed import HEDProcessor
|
||||
from invokeai.backend.image_util.lineart import LineartProcessor
|
||||
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
|
||||
class ControlField(BaseModel):
|
||||
image: ImageField = Field(description="The control image")
|
||||
control_model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
||||
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = Field(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("control_output")
|
||||
class ControlOutput(BaseInvocationOutput):
|
||||
"""node output for ControlNet info"""
|
||||
|
||||
# Outputs
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
|
||||
)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self) -> "ControlNetInvocation":
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||
return ControlOutput(
|
||||
control=ControlField(
|
||||
image=self.image,
|
||||
control_model=self.control_model,
|
||||
control_weight=self.control_weight,
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
control_mode=self.control_mode,
|
||||
resize_mode=self.resize_mode,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# This invocation exists for other invocations to subclass it - do not register with @invocation!
|
||||
class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Base class for invocations that preprocess images for ControlNet"""
|
||||
|
||||
image: ImageField = InputField(description="The image to process")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
# superclass just passes through image without processing
|
||||
return image
|
||||
|
||||
def load_image(self, context: InvocationContext) -> Image.Image:
|
||||
# allows override for any special formatting specific to the preprocessor
|
||||
return context.images.get_pil(self.image.image_name, "RGB")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
self._context = context
|
||||
raw_image = self.load_image(context)
|
||||
# image type should be PIL.PngImagePlugin.PngImageFile ?
|
||||
processed_image = self.run_processor(raw_image)
|
||||
|
||||
# currently can't see processed image in node UI without a showImage node,
|
||||
# so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery
|
||||
image_dto = context.images.save(image=processed_image)
|
||||
|
||||
"""Builds an ImageOutput and its ImageField"""
|
||||
processed_image_field = ImageField(image_name=image_dto.image_name)
|
||||
return ImageOutput(
|
||||
image=processed_image_field,
|
||||
# width=processed_image.width,
|
||||
width=image_dto.width,
|
||||
# height=processed_image.height,
|
||||
height=image_dto.height,
|
||||
# mode=processed_image.mode,
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"canny_image_processor",
|
||||
title="Canny Processor",
|
||||
tags=["controlnet", "canny"],
|
||||
category="controlnet",
|
||||
version="1.3.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Canny edge detection for ControlNet"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
low_threshold: int = InputField(
|
||||
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
||||
)
|
||||
high_threshold: int = InputField(
|
||||
default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)"
|
||||
)
|
||||
|
||||
def load_image(self, context: InvocationContext) -> Image.Image:
|
||||
# Keep alpha channel for Canny processing to detect edges of transparent areas
|
||||
return context.images.get_pil(self.image.image_name, "RGBA")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processed_image = get_canny_edges(
|
||||
image,
|
||||
self.low_threshold,
|
||||
self.high_threshold,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"hed_image_processor",
|
||||
title="HED (softedge) Processor",
|
||||
tags=["controlnet", "hed", "softedge"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies HED edge detection to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# safe not supported in controlnet_aux v0.0.3
|
||||
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
hed_processor = HEDProcessor()
|
||||
processed_image = hed_processor.run(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
# safe not supported in controlnet_aux v0.0.3
|
||||
# safe=self.safe,
|
||||
scribble=self.scribble,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"lineart_image_processor",
|
||||
title="Lineart Processor",
|
||||
tags=["controlnet", "lineart"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
lineart_processor = LineartProcessor()
|
||||
processed_image = lineart_processor.run(
|
||||
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution, coarse=self.coarse
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"lineart_anime_image_processor",
|
||||
title="Lineart Anime Processor",
|
||||
tags=["controlnet", "lineart", "anime"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art anime processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processor = LineartAnimeProcessor()
|
||||
processed_image = processor.run(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"midas_depth_image_processor",
|
||||
title="Midas Depth Processor",
|
||||
tags=["controlnet", "midas"],
|
||||
category="controlnet",
|
||||
version="1.2.4",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Midas depth processing to image"""
|
||||
|
||||
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
# TODO: replace from_pretrained() calls with context.models.download_and_cache() (or similar)
|
||||
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = midas_processor(
|
||||
image,
|
||||
a=np.pi * self.a_mult,
|
||||
bg_th=self.bg_th,
|
||||
image_resolution=self.image_resolution,
|
||||
detect_resolution=self.detect_resolution,
|
||||
# dept_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal=self.depth_and_normal,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"normalbae_image_processor",
|
||||
title="Normal BAE Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies NormalBae processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = normalbae_processor(
|
||||
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"mlsd_image_processor",
|
||||
title="MLSD Processor",
|
||||
tags=["controlnet", "mlsd"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies MLSD processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
||||
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = mlsd_processor(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
thr_v=self.thr_v,
|
||||
thr_d=self.thr_d,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"pidi_image_processor",
|
||||
title="PIDI Processor",
|
||||
tags=["controlnet", "pidi"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies PIDI processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = pidi_processor(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
safe=self.safe,
|
||||
scribble=self.scribble,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"content_shuffle_image_processor",
|
||||
title="Content Shuffle Processor",
|
||||
tags=["controlnet", "contentshuffle"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies content shuffle processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter")
|
||||
w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
|
||||
f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
content_shuffle_processor = ContentShuffleDetector()
|
||||
processed_image = content_shuffle_processor(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
h=self.h,
|
||||
w=self.w,
|
||||
f=self.f,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
# should work with controlnet_aux >= 0.0.4 and timm <= 0.6.13
|
||||
@invocation(
|
||||
"zoe_depth_image_processor",
|
||||
title="Zoe (Depth) Processor",
|
||||
tags=["controlnet", "zoe", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Zoe depth processing to image"""
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = zoe_depth_processor(image)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"mediapipe_face_processor",
|
||||
title="Mediapipe Face Processor",
|
||||
tags=["controlnet", "mediapipe", "face"],
|
||||
category="controlnet",
|
||||
version="1.2.4",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies mediapipe face processing to image"""
|
||||
|
||||
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
||||
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
mediapipe_face_processor = MediapipeFaceDetector()
|
||||
processed_image = mediapipe_face_processor(
|
||||
image,
|
||||
max_faces=self.max_faces,
|
||||
min_confidence=self.min_confidence,
|
||||
image_resolution=self.image_resolution,
|
||||
detect_resolution=self.detect_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"leres_image_processor",
|
||||
title="Leres (Depth) Processor",
|
||||
tags=["controlnet", "leres", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies leres processing to image"""
|
||||
|
||||
thr_a: float = InputField(default=0, description="Leres parameter `thr_a`")
|
||||
thr_b: float = InputField(default=0, description="Leres parameter `thr_b`")
|
||||
boost: bool = InputField(default=False, description="Whether to use boost mode")
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = leres_processor(
|
||||
image,
|
||||
thr_a=self.thr_a,
|
||||
thr_b=self.thr_b,
|
||||
boost=self.boost,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"tile_image_processor",
|
||||
title="Tile Resample Processor",
|
||||
tags=["controlnet", "tile"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Tile resampler processor"""
|
||||
|
||||
# res: int = InputField(default=512, ge=0, le=1024, description="The pixel resolution for each tile")
|
||||
down_sampling_rate: float = InputField(default=1.0, ge=1.0, le=8.0, description="Down sampling rate")
|
||||
|
||||
# tile_resample copied from sd-webui-controlnet/scripts/processor.py
|
||||
def tile_resample(
|
||||
self,
|
||||
np_img: np.ndarray,
|
||||
res=512, # never used?
|
||||
down_sampling_rate=1.0,
|
||||
):
|
||||
np_img = HWC3(np_img)
|
||||
if down_sampling_rate < 1.1:
|
||||
return np_img
|
||||
H, W, C = np_img.shape
|
||||
H = int(float(H) / float(down_sampling_rate))
|
||||
W = int(float(W) / float(down_sampling_rate))
|
||||
np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA)
|
||||
return np_img
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
np_img = np.array(image, dtype=np.uint8)
|
||||
processed_np_image = self.tile_resample(
|
||||
np_img,
|
||||
# res=self.tile_size,
|
||||
down_sampling_rate=self.down_sampling_rate,
|
||||
)
|
||||
processed_image = Image.fromarray(processed_np_image)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"segment_anything_processor",
|
||||
title="Segment Anything Processor",
|
||||
tags=["controlnet", "segmentanything"],
|
||||
category="controlnet",
|
||||
version="1.2.4",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies segment anything processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
||||
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
|
||||
"ybelkada/segment-anything", subfolder="checkpoints"
|
||||
)
|
||||
np_img = np.array(image, dtype=np.uint8)
|
||||
processed_image = segment_anything_processor(
|
||||
np_img, image_resolution=self.image_resolution, detect_resolution=self.detect_resolution
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
class SamDetectorReproducibleColors(SamDetector):
|
||||
# overriding SamDetector.show_anns() method to use reproducible colors for segmentation image
|
||||
# base class show_anns() method randomizes colors,
|
||||
# which seems to also lead to non-reproducible image generation
|
||||
# so using ADE20k color palette instead
|
||||
def show_anns(self, anns: List[Dict]):
|
||||
if len(anns) == 0:
|
||||
return
|
||||
sorted_anns = sorted(anns, key=(lambda x: x["area"]), reverse=True)
|
||||
h, w = anns[0]["segmentation"].shape
|
||||
final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB")
|
||||
palette = ade_palette()
|
||||
for i, ann in enumerate(sorted_anns):
|
||||
m = ann["segmentation"]
|
||||
img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8)
|
||||
# doing modulo just in case number of annotated regions exceeds number of colors in palette
|
||||
ann_color = palette[i % len(palette)]
|
||||
img[:, :] = ann_color
|
||||
final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m * 255)))
|
||||
return np.array(final_img, dtype=np.uint8)
|
||||
|
||||
|
||||
@invocation(
|
||||
"color_map_image_processor",
|
||||
title="Color Map Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a color map from the provided image"""
|
||||
|
||||
color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
np_image = np.array(image, dtype=np.uint8)
|
||||
height, width = np_image.shape[:2]
|
||||
|
||||
width_tile_size = min(self.color_map_tile_size, width)
|
||||
height_tile_size = min(self.color_map_tile_size, height)
|
||||
|
||||
color_map = cv2.resize(
|
||||
np_image,
|
||||
(width // width_tile_size, height // height_tile_size),
|
||||
interpolation=cv2.INTER_CUBIC,
|
||||
)
|
||||
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
color_map = Image.fromarray(color_map)
|
||||
return color_map
|
||||
|
||||
|
||||
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small", "small_v2"]
|
||||
# DepthAnything V2 Small model is licensed under Apache 2.0 but not the base and large models.
|
||||
DEPTH_ANYTHING_MODELS = {
|
||||
"large": "LiheYoung/depth-anything-large-hf",
|
||||
"base": "LiheYoung/depth-anything-base-hf",
|
||||
"small": "LiheYoung/depth-anything-small-hf",
|
||||
"small_v2": "depth-anything/Depth-Anything-V2-Small-hf",
|
||||
}
|
||||
|
||||
|
||||
@invocation(
|
||||
"depth_anything_image_processor",
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.1.3",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small_v2", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
def load_depth_anything(model_path: Path):
|
||||
depth_anything_pipeline = pipeline(model=str(model_path), task="depth-estimation", local_files_only=True)
|
||||
assert isinstance(depth_anything_pipeline, DepthEstimationPipeline)
|
||||
return DepthAnythingPipeline(depth_anything_pipeline)
|
||||
|
||||
with self._context.models.load_remote_model(
|
||||
source=DEPTH_ANYTHING_MODELS[self.model_size], loader=load_depth_anything
|
||||
) as depth_anything_detector:
|
||||
assert isinstance(depth_anything_detector, DepthAnythingPipeline)
|
||||
depth_map = depth_anything_detector.generate_depth(image)
|
||||
|
||||
# Resizing to user target specified size
|
||||
new_height = int(image.size[1] * (self.resolution / image.size[0]))
|
||||
depth_map = depth_map.resize((self.resolution, new_height))
|
||||
|
||||
return depth_map
|
||||
|
||||
|
||||
@invocation(
|
||||
"dw_openpose_image_processor",
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.1.1",
|
||||
classification=Classification.Deprecated,
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
onnx_det = self._context.models.download_and_cache_model(DWPOSE_MODELS["yolox_l.onnx"])
|
||||
onnx_pose = self._context.models.download_and_cache_model(DWPOSE_MODELS["dw-ll_ucoco_384.onnx"])
|
||||
|
||||
dw_openpose = DWOpenposeDetector(onnx_det=onnx_det, onnx_pose=onnx_pose)
|
||||
processed_image = dw_openpose(
|
||||
image,
|
||||
draw_face=self.draw_face,
|
||||
draw_hands=self.draw_hands,
|
||||
draw_body=self.draw_body,
|
||||
resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"heuristic_resize",
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
"""Resize an image using a heuristic method. Preserves edge maps."""
|
||||
|
||||
image: ImageField = InputField(description="The image to resize")
|
||||
width: int = InputField(default=512, ge=1, description="The width to resize to (px)")
|
||||
height: int = InputField(default=512, ge=1, description="The height to resize to (px)")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
@@ -6,6 +6,7 @@ from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import DEFAULT_PRECISION
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField
|
||||
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
@@ -28,7 +29,11 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
|
||||
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
|
||||
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=4)
|
||||
fp32: bool = InputField(
|
||||
default=DEFAULT_PRECISION == torch.float32,
|
||||
description=FieldDescriptions.fp32,
|
||||
ui_order=4,
|
||||
)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
@@ -60,7 +65,6 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
# TODO:
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
|
||||
|
||||
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user