Compare commits
1 Commits
main
...
psyche/fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
922f8e3749 |
@@ -1,11 +1,9 @@
|
||||
*
|
||||
!invokeai
|
||||
!pyproject.toml
|
||||
!uv.lock
|
||||
!docker/docker-entrypoint.sh
|
||||
!LICENSE
|
||||
|
||||
**/dist
|
||||
**/node_modules
|
||||
**/__pycache__
|
||||
**/*.egg-info
|
||||
**/*.egg-info
|
||||
@@ -1,5 +1,2 @@
|
||||
b3dccfaeb636599c02effc377cdd8a87d658256c
|
||||
218b6d0546b990fc449c876fb99f44b50c4daa35
|
||||
182580ff6970caed400be178c5b888514b75d7f2
|
||||
8e9d5c1187b0d36da80571ce4c8ba9b3a37b6c46
|
||||
99aac5870e1092b182e6c5f21abcaab6936a4ad1
|
||||
4
.gitattributes
vendored
@@ -2,6 +2,4 @@
|
||||
# Only affects text files and ignores other file types.
|
||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||
* text=auto
|
||||
docker/** text eol=lf
|
||||
tests/test_model_probe/stripped_models/** filter=lfs diff=lfs merge=lfs -text
|
||||
tests/model_identification/stripped_models/** filter=lfs diff=lfs merge=lfs -text
|
||||
docker/** text eol=lf
|
||||
40
.github/CODEOWNERS
vendored
@@ -1,32 +1,32 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
|
||||
# documentation - anyone with write privileges can review
|
||||
/docs/
|
||||
/mkdocs.yml
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
|
||||
|
||||
# nodes
|
||||
/invokeai/app/ @blessedcoolant @lstein @dunkeroni @JPPhoto
|
||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant
|
||||
/docker/ @lstein @blessedcoolant
|
||||
/scripts/ @lstein @blessedcoolant
|
||||
/installer/ @lstein @blessedcoolant
|
||||
/invokeai/assets @lstein @blessedcoolant
|
||||
/invokeai/configs @lstein @blessedcoolant
|
||||
/invokeai/version @lstein @blessedcoolant
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @lstein @dunkeroni
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
|
||||
# generation, model management, postprocessing
|
||||
/invokeai/backend @lstein @blessedcoolant @dunkeroni @JPPhoto @Pfannkuchensack
|
||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein
|
||||
/invokeai/frontend/install @lstein
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant
|
||||
/invokeai/frontend/training @lstein @blessedcoolant
|
||||
/invokeai/frontend/web @blessedcoolant @lstein @dunkeroni @Pfannkuchensack
|
||||
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -21,20 +21,6 @@ body:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install_method
|
||||
attributes:
|
||||
label: Install method
|
||||
description: How did you install Invoke?
|
||||
multiple: false
|
||||
options:
|
||||
- "Invoke's Launcher"
|
||||
- 'Stability Matrix'
|
||||
- 'Pinokio'
|
||||
- 'Manual'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: __Describe your environment__
|
||||
@@ -90,8 +76,8 @@ body:
|
||||
attributes:
|
||||
label: Version number
|
||||
description: |
|
||||
The version of Invoke you have installed. If it is not the [latest version](https://github.com/invoke-ai/InvokeAI/releases/latest), please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. v6.0.2
|
||||
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. 3.6.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -99,17 +85,17 @@ body:
|
||||
id: browser-version
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Your web browser and version, if you do not use the Launcher's provided GUI.
|
||||
description: Your web browser and version.
|
||||
placeholder: ex. Firefox 123.0b3
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: python-deps
|
||||
attributes:
|
||||
label: System Information
|
||||
label: Python dependencies
|
||||
description: |
|
||||
Click the gear icon at the bottom left corner, then click "About". Click the copy button and then paste here.
|
||||
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,7 +1,7 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Project-Documentation
|
||||
url: https://invoke.ai/
|
||||
url: https://invoke-ai.github.io/InvokeAI/
|
||||
about: Should be your first place to go when looking for manuals/FAQs regarding our InvokeAI Toolkit
|
||||
- name: Discord
|
||||
url: https://discord.gg/ZmtBAhwWhy
|
||||
|
||||
@@ -3,15 +3,15 @@ description: Installs frontend dependencies with pnpm, with caching
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: setup node 20
|
||||
- name: setup node 18
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
node-version: '18'
|
||||
|
||||
- name: setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
version: 8.15.6
|
||||
run_install: false
|
||||
|
||||
- name: get pnpm store directory
|
||||
|
||||
1
.github/pull_request_template.md
vendored
@@ -18,6 +18,5 @@
|
||||
|
||||
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
|
||||
- [ ] _Tests added / updated (if applicable)_
|
||||
- [ ] _❗Changes to a redux slice have a corresponding migration_
|
||||
- [ ] _Documentation added / updated (if applicable)_
|
||||
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
|
||||
|
||||
27
.github/workflows/build-container.yml
vendored
@@ -45,23 +45,13 @@ jobs:
|
||||
steps:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
# the /mnt dir has 70GBs of free space
|
||||
# /dev/sda1 74G 28K 70G 1% /mnt
|
||||
# According to some online posts the /mnt is not always there, so checking before setting docker to use it
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
if [ -f /mnt/swapfile ]; then
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
fi
|
||||
if [ -d /mnt ]; then
|
||||
sudo chmod -R 777 /mnt
|
||||
echo '{"data-root": "/mnt/docker-root"}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
@@ -86,6 +76,9 @@ jobs:
|
||||
latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
suffix=-${{ matrix.gpu-driver }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
@@ -107,12 +100,10 @@ jobs:
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
build-args: |
|
||||
GPU_DRIVER=${{ matrix.gpu-driver }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
# cache-from: |
|
||||
# type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
# type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
# cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Builds and uploads python build artifacts.
|
||||
# Builds and uploads the installer and python build artifacts.
|
||||
|
||||
name: build wheel
|
||||
name: build installer
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
@@ -27,12 +27,19 @@ jobs:
|
||||
- name: setup frontend
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: build wheel
|
||||
id: build_wheel
|
||||
run: ./scripts/build_wheel.sh
|
||||
- name: create installer
|
||||
id: create_installer
|
||||
run: ./create_installer.sh
|
||||
working-directory: installer
|
||||
|
||||
- name: upload python distribution artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: ${{ steps.build_wheel.outputs.DIST_PATH }}
|
||||
path: ${{ steps.create_installer.outputs.DIST_PATH }}
|
||||
|
||||
- name: upload installer artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: installer
|
||||
path: ${{ steps.create_installer.outputs.INSTALLER_PATH }}
|
||||
1
.github/workflows/close-inactive-issues.yml
vendored
@@ -23,7 +23,6 @@ jobs:
|
||||
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
only-labels: "bug"
|
||||
exempt-issue-labels: "Active Issue"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
operations-per-run: 500
|
||||
|
||||
141
.github/workflows/deploy-docs.yml
vendored
@@ -1,141 +0,0 @@
|
||||
name: 'docs'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
deploy_target:
|
||||
description: 'Deploy target (custom = invoke.ai, ghpages = invoke-ai.github.io/InvokeAI)'
|
||||
type: choice
|
||||
options:
|
||||
- custom
|
||||
- ghpages
|
||||
default: custom
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docs: ${{ steps.manual.outputs.docs || steps.filter.outputs.docs }}
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: mark manual run
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
id: manual
|
||||
run: echo "docs=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: detect docs-related changes
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
id: filter
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
docs:
|
||||
- '.github/workflows/deploy-docs.yml'
|
||||
- 'docs/**'
|
||||
- 'scripts/generate_docs_json.py'
|
||||
- 'invokeai/app/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
|
||||
check-and-build:
|
||||
needs: changes
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'pull_request' &&
|
||||
github.event.pull_request.draft == false &&
|
||||
needs.changes.outputs.docs == 'true') ||
|
||||
(github.event_name == 'push' && needs.changes.outputs.docs == 'true')
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Python (needed for generate-docs-data)
|
||||
- name: setup uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: '3.11'
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
# generate_docs_json.py only needs the invokeai package importable
|
||||
# (pydantic + invokeai.app/backend). Skip the [test] extra to keep CI fast.
|
||||
- name: install python dependencies
|
||||
run: uv pip install --editable .
|
||||
|
||||
# Node (needed for docs build)
|
||||
- name: setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22.12.0'
|
||||
|
||||
- name: setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: install docs dependencies
|
||||
run: pnpm install --prefer-frozen-lockfile
|
||||
working-directory: docs
|
||||
|
||||
# Checks
|
||||
- name: verify generated docs data
|
||||
run: pnpm run check-docs-data
|
||||
working-directory: docs
|
||||
|
||||
- name: build docs
|
||||
run: pnpm build
|
||||
working-directory: docs
|
||||
env:
|
||||
DEPLOY_TARGET: ${{ github.event_name == 'workflow_dispatch' && inputs.deploy_target || github.ref == 'refs/heads/main' && 'ghpages' || 'custom' }}
|
||||
|
||||
# Upload artifact for deploy (main branch only)
|
||||
- name: upload pages artifact
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: docs/dist
|
||||
|
||||
deploy:
|
||||
if: github.ref == 'refs/heads/main'
|
||||
needs: check-and-build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
steps:
|
||||
- name: deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
7
.github/workflows/frontend-checks.yml
vendored
@@ -44,12 +44,7 @@ jobs:
|
||||
- name: check for changed frontend files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
|
||||
7
.github/workflows/frontend-tests.yml
vendored
@@ -44,12 +44,7 @@ jobs:
|
||||
- name: check for changed frontend files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
|
||||
30
.github/workflows/lfs-checks.yml
vendored
@@ -1,30 +0,0 @@
|
||||
# Checks that large files and LFS-tracked files are properly checked in with pointer format.
|
||||
# Uses https://github.com/ppremk/lfs-warning to detect LFS issues.
|
||||
|
||||
name: 'lfs checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lfs-check:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
# Required to label and comment on the PRs
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check lfs files
|
||||
uses: ppremk/lfs-warning@v3.3
|
||||
49
.github/workflows/mkdocs-material.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
# This is a mostly a copy-paste from https://github.com/squidfunk/mkdocs-material/blob/master/docs/publishing-your-site.md
|
||||
|
||||
name: mkdocs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
||||
REPO_NAME: '${{ github.repository }}'
|
||||
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: set cache id
|
||||
run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
|
||||
- name: use cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
|
||||
- name: install dependencies
|
||||
run: python -m pip install ".[docs]"
|
||||
|
||||
- name: build & deploy
|
||||
run: mkdocs gh-deploy --force
|
||||
28
.github/workflows/python-checks.yml
vendored
@@ -34,9 +34,6 @@ on:
|
||||
|
||||
jobs:
|
||||
python-checks:
|
||||
env:
|
||||
# uv requires a venv by default - but for this, we can simply use the system python
|
||||
UV_SYSTEM_PYTHON: 1
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5 # expected run time: <1 min
|
||||
steps:
|
||||
@@ -46,12 +43,7 @@ jobs:
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
@@ -60,23 +52,25 @@ jobs:
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
- name: setup uv
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: check pypi classifiers
|
||||
- name: install ruff
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
|
||||
run: pip install ruff==0.6.0
|
||||
shell: bash
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||
run: ruff check --output-format=github .
|
||||
shell: bash
|
||||
|
||||
- name: ruff format
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 format --check .
|
||||
run: ruff format --check .
|
||||
shell: bash
|
||||
|
||||
40
.github/workflows/python-tests.yml
vendored
@@ -39,15 +39,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
platform:
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
include:
|
||||
- platform: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: linux-cpu
|
||||
os: ubuntu-24.04
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: macos-default
|
||||
@@ -61,22 +70,14 @@ jobs:
|
||||
timeout-minutes: 15 # expected run time: 2-6 min, depending on platform
|
||||
env:
|
||||
PIP_USE_PEP517: '1'
|
||||
UV_SYSTEM_PYTHON: 1
|
||||
|
||||
steps:
|
||||
- name: checkout
|
||||
# https://github.com/nschloe/action-cached-lfs-checkout
|
||||
uses: nschloe/action-cached-lfs-checkout@f46300cd8952454b9f0a21a3d133d4bd5684cfc2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
@@ -85,25 +86,20 @@ jobs:
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
- name: setup uv
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
env:
|
||||
UV_INDEX: ${{ matrix.extra-index-url }}
|
||||
run: uv pip install --editable ".[test]"
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
run: >
|
||||
pip3 install --editable=".[test]"
|
||||
|
||||
- name: run pytest
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
|
||||
2
.github/workflows/release.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
always_run: true
|
||||
|
||||
build:
|
||||
uses: ./.github/workflows/build-wheel.yml
|
||||
uses: ./.github/workflows/build-installer.yml
|
||||
|
||||
publish-testpypi:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
78
.github/workflows/translations.yml
vendored
@@ -1,78 +0,0 @@
|
||||
name: Crowdin Translations
|
||||
|
||||
on:
|
||||
# Allow manual runs from the Actions tab
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
upload_sources:
|
||||
description: 'Upload source strings to Crowdin'
|
||||
type: boolean
|
||||
default: true
|
||||
download_translations:
|
||||
description: 'Download translations from Crowdin'
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
# Upload sources & download translations when source files change on main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'invokeai/frontend/web/public/locales/en.json'
|
||||
- 'docs/src/content/i18n/en.json'
|
||||
- 'docs/src/content/docs/**/*.md'
|
||||
- 'docs/src/content/docs/**/*.mdx'
|
||||
- '!docs/src/content/docs/[a-z][a-z]/**'
|
||||
- '!docs/src/content/docs/[a-z][a-z]-*/**'
|
||||
- 'crowdin.yml'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
crowdin-sync:
|
||||
name: Sync with Crowdin
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Crowdin Sync
|
||||
uses: crowdin/github-action@v2
|
||||
with:
|
||||
# Upload sources on push to main or when manually requested
|
||||
upload_sources: ${{ github.event_name != 'workflow_dispatch' || inputs.upload_sources }}
|
||||
upload_translations: false
|
||||
|
||||
# Download translations on push to main or when manually requested
|
||||
download_translations: ${{ github.event_name != 'workflow_dispatch' || inputs.download_translations }}
|
||||
|
||||
# PR settings for downloaded translations
|
||||
create_pull_request: true
|
||||
pull_request_title: 'i18n: update translations from Crowdin'
|
||||
pull_request_body: |
|
||||
Automated pull request from [Crowdin](https://crowdin.com).
|
||||
|
||||
This PR updates translations for:
|
||||
- **Web App UI** (`invokeai/frontend/web/public/locales/`)
|
||||
- **Documentation UI Strings** (`docs/src/content/i18n/`)
|
||||
- **Documentation Content** (`docs/src/content/docs/<locale>/`)
|
||||
pull_request_base_branch_name: main
|
||||
pull_request_labels: 'i18n'
|
||||
|
||||
# Commit settings
|
||||
localization_branch_name: crowdin/translations
|
||||
commit_message: 'i18n: update translations from Crowdin'
|
||||
|
||||
# Use the config file at the repo root
|
||||
config: crowdin.yml
|
||||
|
||||
# Skip untranslated strings/files to keep partial translations clean
|
||||
download_translations_args: '--skip-untranslated-strings'
|
||||
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CROWDIN_PROJECT_ID: ${{ secrets.CROWDIN_PROJECT_ID }}
|
||||
CROWDIN_PERSONAL_TOKEN: ${{ secrets.CROWDIN_PERSONAL_TOKEN }}
|
||||
112
.github/workflows/typegen-checks.yml
vendored
@@ -1,112 +0,0 @@
|
||||
# Runs typegen schema quality checks.
|
||||
# Frontend types should match the server.
|
||||
#
|
||||
# Checks for changes to files before running the checks.
|
||||
# If always_run is true, always runs the checks.
|
||||
|
||||
name: 'typegen checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
typegen-checks:
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 15 # expected run time: <5 min
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
if [ -f /mnt/swapfile ]; then
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
fi
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
- name: check for changed files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
src:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
|
||||
- name: setup uv
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
python-version: '3.11'
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
env:
|
||||
UV_INDEX: ${{ matrix.extra-index-url }}
|
||||
run: uv pip install --editable .
|
||||
|
||||
- name: install frontend dependencies
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: copy schema
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: cp invokeai/frontend/web/src/services/api/schema.ts invokeai/frontend/web/src/services/api/schema_orig.ts
|
||||
shell: bash
|
||||
|
||||
- name: generate schema
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: cd invokeai/frontend/web && uv run ../../../scripts/generate_openapi_schema.py | pnpm typegen
|
||||
shell: bash
|
||||
|
||||
- name: compare files
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: |
|
||||
if ! diff invokeai/frontend/web/src/services/api/schema.ts invokeai/frontend/web/src/services/api/schema_orig.ts; then
|
||||
echo "Files are different!";
|
||||
exit 1;
|
||||
fi
|
||||
shell: bash
|
||||
68
.github/workflows/uv-lock-checks.yml
vendored
@@ -1,68 +0,0 @@
|
||||
# Check the `uv` lockfile for consistency with `pyproject.toml`.
|
||||
#
|
||||
# If this check fails, you should run `uv lock` to update the lockfile.
|
||||
|
||||
name: 'uv lock checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
uv-lock-checks:
|
||||
env:
|
||||
# uv requires a venv by default - but for this, we can simply use the system python
|
||||
UV_SYSTEM_PYTHON: 1
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5 # expected run time: <1 min
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
uvlock-pyprojecttoml:
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
|
||||
- name: setup uv
|
||||
if: ${{ steps.changed-files.outputs.uvlock-pyprojecttoml_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
|
||||
- name: check lockfile
|
||||
if: ${{ steps.changed-files.outputs.uvlock-pyprojecttoml_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv lock --locked # this will exit with 1 if the lockfile is not consistent with pyproject.toml
|
||||
shell: bash
|
||||
8
.gitignore
vendored
@@ -179,9 +179,7 @@ cython_debug/
|
||||
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
worktrees/
|
||||
.vscode/
|
||||
.zed/
|
||||
|
||||
# source installer files
|
||||
installer/*zip
|
||||
@@ -190,9 +188,3 @@ installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
installer/InvokeAI-Installer/
|
||||
.aider*
|
||||
|
||||
.claude/
|
||||
|
||||
# Weblate configuration file
|
||||
weblate.ini
|
||||
@@ -4,29 +4,21 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
name: black
|
||||
stages: [pre-commit]
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: black
|
||||
types: [python]
|
||||
|
||||
- id: flake8
|
||||
name: flake8
|
||||
stages: [pre-commit]
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: flake8
|
||||
types: [python]
|
||||
|
||||
- id: isort
|
||||
name: isort
|
||||
stages: [pre-commit]
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: isort
|
||||
types: [python]
|
||||
|
||||
- id: uvlock
|
||||
name: uv lock
|
||||
stages: [pre-commit]
|
||||
language: system
|
||||
entry: uv lock
|
||||
files: ^pyproject\.toml$
|
||||
pass_filenames: false
|
||||
types: [python]
|
||||
34
Makefile
@@ -12,25 +12,24 @@ help:
|
||||
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
|
||||
@echo "test Run the unit tests."
|
||||
@echo "update-config-docstring Update the app's config docstring so mkdocs can autogenerate it correctly."
|
||||
@echo "frontend-install Install the pnpm modules needed for the frontend"
|
||||
@echo "frontend-build Build the frontend for localhost:9090"
|
||||
@echo "frontend-test Run the frontend test suite once"
|
||||
@echo "frontend-install Install the pnpm modules needed for the front end"
|
||||
@echo "frontend-build Build the frontend in order to run on localhost:9090"
|
||||
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
|
||||
@echo "frontend-typegen Generate types for the frontend from the OpenAPI schema"
|
||||
@echo "frontend-lint Run frontend checks and fixable lint/format steps"
|
||||
@echo "wheel Build the wheel for the current version"
|
||||
@echo "installer-zip Build the installer .zip file for the current version"
|
||||
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
|
||||
@echo "openapi Generate the OpenAPI schema for the app, outputting to stdout"
|
||||
@echo "docs Serve the mkdocs site with live reload"
|
||||
|
||||
# Runs ruff, fixing any safely-fixable errors and formatting
|
||||
ruff:
|
||||
cd invokeai && uv tool run ruff@0.11.2 format
|
||||
ruff check . --fix
|
||||
ruff format .
|
||||
|
||||
# Runs ruff, fixing all errors it can fix and formatting
|
||||
ruff-unsafe:
|
||||
ruff check . --fix --unsafe-fixes
|
||||
ruff format
|
||||
ruff format .
|
||||
|
||||
# Runs mypy, using the config in pyproject.toml
|
||||
mypy:
|
||||
@@ -58,10 +57,6 @@ frontend-install:
|
||||
frontend-build:
|
||||
cd invokeai/frontend/web && pnpm build
|
||||
|
||||
# Run the frontend test suite once
|
||||
frontend-test:
|
||||
cd invokeai/frontend/web && pnpm run test:run
|
||||
|
||||
# Run the frontend in dev mode
|
||||
frontend-dev:
|
||||
cd invokeai/frontend/web && pnpm dev
|
||||
@@ -69,20 +64,13 @@ frontend-dev:
|
||||
frontend-typegen:
|
||||
cd invokeai/frontend/web && python ../../../scripts/generate_openapi_schema.py | pnpm typegen
|
||||
|
||||
frontend-lint:
|
||||
cd invokeai/frontend/web/src && \
|
||||
pnpm lint:tsc && \
|
||||
pnpm lint:dpdm && \
|
||||
pnpm lint:eslint --fix && \
|
||||
pnpm lint:prettier --write
|
||||
|
||||
# Tag the release
|
||||
wheel:
|
||||
cd scripts && ./build_wheel.sh
|
||||
# Installer zip file
|
||||
installer-zip:
|
||||
cd installer && ./create_installer.sh
|
||||
|
||||
# Tag the release
|
||||
tag-release:
|
||||
cd scripts && ./tag_release.sh
|
||||
cd installer && ./tag_release.sh
|
||||
|
||||
# Generate the OpenAPI Schema for the app
|
||||
openapi:
|
||||
@@ -91,4 +79,4 @@ openapi:
|
||||
# Serve the mkdocs site w/ live reload
|
||||
.PHONY: docs
|
||||
docs:
|
||||
mkdocs serve
|
||||
mkdocs serve
|
||||
115
README.md
@@ -4,33 +4,77 @@
|
||||
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
|
||||
#### To learn more about Invoke, or implement our Business solutions, visit [invoke.com]
|
||||
|
||||
[![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
|
||||
</div>
|
||||
|
||||
Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products.
|
||||
|
||||
- Free to use under a commercially-friendly license
|
||||
- Download and install on compatible hardware
|
||||
- Generate, refine, iterate on images, and build workflows
|
||||
Invoke is available in two editions:
|
||||
|
||||
| **Community Edition** | **Professional Edition** |
|
||||
|----------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|
|
||||
| **For users looking for a locally installed, self-hosted and self-managed service** | **For users or teams looking for a cloud-hosted, fully managed service** |
|
||||
| - Free to use under a commercially-friendly license | - Monthly subscription fee with three different plan levels |
|
||||
| - Download and install on compatible hardware | - Offers additional benefits, including multi-user support, improved model training, and more |
|
||||
| - Includes all core studio features: generate, refine, iterate on images, and build workflows | - Hosted in the cloud for easy, secure model access and scalability |
|
||||
| Quick Start -> [Installation and Updates][installation docs] | More Information -> [www.invoke.com/pricing](https://www.invoke.com/pricing) |
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
> ## 📣 Are you a new or returning InvokeAI user?
|
||||
> Take our first annual [User's Survey](https://forms.gle/rCE5KuQ7Wfrd1UnS7)
|
||||
|
||||
---
|
||||
|
||||
# Documentation
|
||||
| **Quick Links** |
|
||||
|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
|
||||
|
||||
| **Quick Links** |
|
||||
| ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
|
||||
</div>
|
||||
|
||||
# Installation
|
||||
## Quick Start
|
||||
|
||||
To get started with Invoke, [Download the Launcher](https://github.com/invoke-ai/launcher/releases/latest).
|
||||
1. Download and unzip the installer from the bottom of the [latest release][latest release link].
|
||||
2. Run the installer script.
|
||||
|
||||
- **Windows**: Double-click on the `install.bat` script.
|
||||
- **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter.
|
||||
- **Linux**: Run `install.sh`.
|
||||
|
||||
3. When prompted, enter a location for the install and select your GPU type.
|
||||
4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS.
|
||||
5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2.
|
||||
6. Select option 1 to start the application. Once it starts up, open your browser and go to <http://localhost:9090>.
|
||||
7. Open the model manager tab to install a starter model and then you'll be ready to generate.
|
||||
|
||||
More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs].
|
||||
|
||||
## Docker Container
|
||||
|
||||
We publish official container images in Github Container Registry: https://github.com/invoke-ai/InvokeAI/pkgs/container/invokeai. Both CUDA and ROCm images are available. Check the above link for relevant tags.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Ensure that Docker is set up to use the GPU. Refer to [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] documentation.
|
||||
|
||||
### Generate!
|
||||
|
||||
Run the container, modifying the command as necessary:
|
||||
|
||||
```bash
|
||||
docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai
|
||||
```
|
||||
|
||||
Then open `http://localhost:9090` and install some models using the Model Manager tab to begin generating.
|
||||
|
||||
For ROCm, add `--device /dev/kfd --device /dev/dri` to the `docker run` command.
|
||||
|
||||
### Persist your data
|
||||
|
||||
You will likely want to persist your workspace outside of the container. Use the `--volume /home/myuser/invokeai:/invokeai` flag to mount some local directory (using its **absolute** path) to the `/invokeai` path inside the container. Your generated images and models will reside there. You can use this directory with other InvokeAI installations, or switch between runtime directories as needed.
|
||||
|
||||
### DIY
|
||||
|
||||
Build your own image and customize the environment to match your needs using our `docker-compose` stack. See [README.md](./docker/README.md) in the [docker](./docker) directory.
|
||||
|
||||
## Troubleshooting, FAQ and Support
|
||||
|
||||
@@ -52,45 +96,21 @@ The Unified Canvas is a fully integrated canvas implementation with support for
|
||||
|
||||
### Workflows & Nodes
|
||||
|
||||
Invoke offers a fully featured workflow management solution, enabling users to combine the power of node-based workflows with the ease of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
|
||||
Invoke offers a fully featured workflow management solution, enabling users to combine the power of node-based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
|
||||
|
||||
### Board & Gallery Management
|
||||
|
||||
Invoke features an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
|
||||
|
||||
### Model Support
|
||||
- SD 1.5
|
||||
- SD 2.0
|
||||
- SDXL
|
||||
- SD 3.5 Medium
|
||||
- SD 3.5 Large
|
||||
- CogView 4
|
||||
- Flux.1 Dev
|
||||
- Flux.1 Schnell
|
||||
- Flux.1 Kontext
|
||||
- Flux.1 Krea
|
||||
- Flux Redux
|
||||
- Flux Fill
|
||||
- Flux.2 Klein 4B
|
||||
- Flux.2 Klein 9B
|
||||
- Z-Image Turbo
|
||||
- Z-Image Base
|
||||
- Anima
|
||||
- Qwen Image
|
||||
- Qwen Image Edit
|
||||
- Nano Banana (API Only)
|
||||
- GPT Image (API Only)
|
||||
- Wan (API Only)
|
||||
|
||||
### Other features
|
||||
|
||||
- Support for ckpt, diffusers, and some gguf models
|
||||
- Support for both ckpt and diffusers models
|
||||
- SD1.5, SD2.0, SDXL, and FLUX support
|
||||
- Upscaling Tools
|
||||
- Embedding Manager & Support
|
||||
- Model Manager & Support
|
||||
- Workflow creation & management
|
||||
- Node-Based Architecture
|
||||
- Object Segmentation & Selection Models (SAM / SAM2)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -106,14 +126,15 @@ Invoke is a combined effort of [passionate and talented people from across the w
|
||||
|
||||
Original portions of the software are Copyright © 2024 by respective contributors.
|
||||
|
||||
[features docs]: https://invoke.ai/
|
||||
[faq]: https://invoke.ai/troubleshooting/faq/
|
||||
[contributors]: https://invoke.ai/contributing/contributors/
|
||||
[features docs]: https://invoke-ai.github.io/InvokeAI/features/database/
|
||||
[faq]: https://invoke-ai.github.io/InvokeAI/faq/
|
||||
[contributors]: https://invoke-ai.github.io/InvokeAI/contributing/contributors/
|
||||
[invoke.com]: https://www.invoke.com/about
|
||||
[github issues]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[docs home]: https://invoke.ai
|
||||
[installation docs]: https://invoke.ai/start-here/installation/
|
||||
[docs home]: https://invoke-ai.github.io/InvokeAI
|
||||
[installation docs]: https://invoke-ai.github.io/InvokeAI/installation/
|
||||
[#dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
[contributing docs]: https://invoke.ai/contributing/
|
||||
[contributing docs]: https://invoke-ai.github.io/InvokeAI/contributing/
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
|
||||
14
SECURITY.md
@@ -1,14 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version of Invoke will receive security updates.
|
||||
We do not currently maintain multiple versions of the application with updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
|
||||
|
||||
At this time, we do not maintain a formal bug bounty program.
|
||||
|
||||
You can also share identified security issues with our team on huntr.com
|
||||
@@ -1,169 +0,0 @@
|
||||
# User Isolation Implementation Summary
|
||||
|
||||
This document describes the implementation of user isolation features in the InvokeAI session queue and processing system to address issues identified in the enhancement request.
|
||||
|
||||
## Issues Addressed
|
||||
|
||||
### 1. Cross-User Image/Preview Visibility
|
||||
**Problem:** When two users are logged in simultaneously and one initiates a generation, the generation preview shows up in both users' browsers and the generated image gets saved to both users' image boards.
|
||||
|
||||
**Solution:** Implemented socket-level event filtering based on user authentication:
|
||||
|
||||
#### Backend Changes (`invokeai/app/api/sockets.py`):
|
||||
- Added socket authentication middleware in `_handle_connect()` method
|
||||
- Extracts JWT token from socket auth data or HTTP headers
|
||||
- Verifies token using existing `verify_token()` function
|
||||
- Stores `user_id` and `is_admin` in socket session for later use
|
||||
- Modified `_handle_queue_event()` to filter events by user:
|
||||
- For `QueueItemEventBase` events, only emit to:
|
||||
- The user who owns the queue item (`user_id` matches)
|
||||
- Admin users (`is_admin` is True)
|
||||
- For general queue events, emit to all subscribers
|
||||
|
||||
#### Event System Changes (`invokeai/app/services/events/events_common.py`):
|
||||
- Added `user_id` field to `QueueItemEventBase` class
|
||||
- Updated all event builders to include `user_id` from queue items:
|
||||
- `InvocationStartedEvent.build()`
|
||||
- `InvocationProgressEvent.build()`
|
||||
- `InvocationCompleteEvent.build()`
|
||||
- `InvocationErrorEvent.build()`
|
||||
- `QueueItemStatusChangedEvent.build()`
|
||||
|
||||
### 2. Batch Field Values Privacy
|
||||
**Problem:** Users can see batch field values from generation processes launched by other users.
|
||||
|
||||
**Solution:** Implemented field value sanitization at the API level:
|
||||
|
||||
#### API Router Changes (`invokeai/app/api/routers/session_queue.py`):
|
||||
- Created `sanitize_queue_item_for_user()` helper function
|
||||
- Clears `field_values` for non-admin users viewing other users' items
|
||||
- Admins and item owners can see all field values
|
||||
- Updated endpoints to require authentication and sanitize responses:
|
||||
- `list_all_queue_items()` - Added `CurrentUser` dependency
|
||||
- `get_queue_items_by_item_ids()` - Added `CurrentUser` dependency
|
||||
- `get_queue_item()` - Added `CurrentUser` dependency
|
||||
|
||||
### 3. Queue Updates Across Browser Windows
|
||||
**Problem:** When the job queue tab is open in multiple browsers and a generation is begun in one browser window, the queue does not update in the other window.
|
||||
|
||||
**Status:** This issue is likely resolved by the socket authentication and event filtering changes. The existing socket subscription mechanism (`subscribe_queue` event) already supports multiple connections per user. Testing is required to confirm this works correctly with the new authentication flow.
|
||||
|
||||
### 4. User Information Display
|
||||
**Problem:** Queue table lacks user identification, making it difficult to know who launched which job.
|
||||
|
||||
**Solution:** Added user information to queue items and UI:
|
||||
|
||||
#### Database Layer (`invokeai/app/services/session_queue/session_queue_sqlite.py`):
|
||||
- Updated SQL queries to JOIN with `users` table
|
||||
- Modified methods to fetch user information:
|
||||
- `get_queue_item()` - Now selects `display_name` and `email` from users table
|
||||
- `dequeue()` - Includes user info
|
||||
- `get_next()` - Includes user info
|
||||
- `get_current()` - Includes user info
|
||||
- `list_all_queue_items()` - Includes user info
|
||||
|
||||
#### Data Model Changes (`invokeai/app/services/session_queue/session_queue_common.py`):
|
||||
- Added optional fields to `SessionQueueItem`:
|
||||
- `user_display_name: Optional[str]` - Display name from users table
|
||||
- `user_email: Optional[str]` - Email from users table
|
||||
- Note: `user_id` field already existed from Migration 25
|
||||
|
||||
#### Frontend UI Changes:
|
||||
- **Constants** (`constants.ts`): Added `user: '8rem'` column width
|
||||
- **Header** (`QueueListHeader.tsx`): Added "User" column header
|
||||
- **Item Component** (`QueueItemComponent.tsx`):
|
||||
- Added logic to display user information (display_name → email → user_id)
|
||||
- Added user column to queue item row
|
||||
- Added tooltip with full username on hover
|
||||
- Added "Hidden for privacy" message when field_values are null for non-owned items
|
||||
- **Localization** (`en.json`): Added translations:
|
||||
- `"user": "User"`
|
||||
- `"fieldValuesHidden": "Hidden for privacy"`
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Token Verification
|
||||
- Tokens are verified using the existing `verify_token()` function from `invokeai.app.services.auth.token_service`
|
||||
- Invalid or missing tokens default to "system" user with non-admin privileges
|
||||
- Socket connections without valid tokens are still accepted for backward compatibility but have limited access
|
||||
|
||||
### Data Privacy
|
||||
- Field values are only visible to:
|
||||
- The user who created the queue item
|
||||
- Admin users
|
||||
- Non-admin users viewing other users' queue items see "Hidden for privacy" instead of field values
|
||||
|
||||
### Admin Privileges
|
||||
- Admin users can see all queue events and field values across all users
|
||||
- Admin status is determined from the JWT token's `is_admin` field
|
||||
|
||||
## Migration Notes
|
||||
|
||||
No database migration is required. The changes leverage:
|
||||
- Existing `user_id` column in `session_queue` table (added in Migration 25)
|
||||
- Existing `users` table (added in Migration 25)
|
||||
- SQL LEFT JOINs to fetch user information (gracefully handles missing user records)
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
### Backend Testing
|
||||
1. **Socket Authentication:**
|
||||
- Verify valid tokens are accepted and user context is stored
|
||||
- Verify invalid tokens default to system user
|
||||
- Verify expired tokens are rejected
|
||||
|
||||
2. **Event Filtering:**
|
||||
- User A should only receive events for their own queue items
|
||||
- Admin users should receive all events
|
||||
- Non-admin users should not receive events from other users
|
||||
|
||||
3. **Field Value Sanitization:**
|
||||
- Non-admin users should see null field_values for other users' items
|
||||
- Admins should see all field values
|
||||
- Users should see their own field values
|
||||
|
||||
### Frontend Testing
|
||||
1. **UI Display:**
|
||||
- User column should display in queue list
|
||||
- Display name should be shown when available
|
||||
- Email should be shown as fallback when display name is missing
|
||||
- User ID should be shown when both display name and email are missing
|
||||
- Tooltip should show full username on hover
|
||||
|
||||
2. **Field Values Display:**
|
||||
- "Hidden for privacy" message should appear when viewing other users' items
|
||||
- Own items should show field values normally
|
||||
|
||||
3. **Multi-Browser Testing:**
|
||||
- Open queue tab in two browsers with different users
|
||||
- Start generation in one browser
|
||||
- Verify other browser doesn't see the preview/progress
|
||||
- Verify admin user can see all generations
|
||||
|
||||
### Integration Testing
|
||||
1. Multi-user scenarios with simultaneous generations
|
||||
2. Queue updates across multiple browser windows
|
||||
3. Admin vs. non-admin privilege differentiation
|
||||
4. Socket reconnection handling
|
||||
|
||||
## Known Limitations
|
||||
|
||||
1. **TypeScript Types:**
|
||||
- The OpenAPI schema needs to be regenerated to include new fields
|
||||
- Run: `cd invokeai/frontend/web && python ../../../scripts/generate_openapi_schema.py | pnpm typegen`
|
||||
|
||||
2. **Backward Compatibility:**
|
||||
- System user ("system") entries will not have display name or email
|
||||
- Existing queue items from before Migration 25 will have user_id="system"
|
||||
|
||||
3. **Socket.IO Session Storage:**
|
||||
- Socket.IO's in-memory session storage may not persist across server restarts
|
||||
- Consider implementing persistent session storage if needed for production
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. Add user filtering to queue list (show only my items vs. all items)
|
||||
2. Add permission system for queue management operations (cancel, retry, delete)
|
||||
3. Implement queue item ownership transfer for administrative purposes
|
||||
4. Add audit logging for queue operations with user attribution
|
||||
5. Consider implementing user-specific queue limits or quotas
|
||||
@@ -22,10 +22,6 @@
|
||||
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
|
||||
# GPU_DRIVER=cuda #| rocm
|
||||
|
||||
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
|
||||
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
|
||||
# RENDER_GROUP_ID=
|
||||
|
||||
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
||||
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
|
||||
# CONTAINER_UID=1000
|
||||
|
||||
@@ -1,11 +1,62 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
#### Web UI ------------------------------------
|
||||
## Builder stage
|
||||
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
FROM library/ubuntu:23.04 AS builder
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt-get install -y \
|
||||
git \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential
|
||||
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is default
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
|
||||
fi &&\
|
||||
|
||||
# xformers + triton fails to install on arm64
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install $extra_index_url_arg -e ".[xformers]"; \
|
||||
else \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
fi
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
||||
FROM node:20-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@10.x && corepack enable
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
@@ -13,95 +64,61 @@ RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN npx vite build
|
||||
|
||||
## Backend ---------------------------------------
|
||||
#### Runtime stage ---------------------------------------
|
||||
|
||||
FROM library/ubuntu:24.04
|
||||
FROM library/ubuntu:23.04 AS runtime
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
git \
|
||||
gosu \
|
||||
libglib2.0-0 \
|
||||
libgl1 \
|
||||
libglx-mesa0 \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
ENV \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
INVOKEAI_SRC=/opt/invokeai \
|
||||
PYTHON_VERSION=3.12 \
|
||||
UV_PYTHON=3.12 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_MANAGED_PYTHON=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
INVOKEAI_ROOT=/invokeai \
|
||||
INVOKEAI_HOST=0.0.0.0 \
|
||||
INVOKEAI_PORT=9090 \
|
||||
PATH="/opt/venv/bin:$PATH" \
|
||||
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
||||
CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
RUN apt update && apt install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
vim \
|
||||
tmux \
|
||||
ncdu \
|
||||
iotop \
|
||||
bzip2 \
|
||||
gosu \
|
||||
magic-wormhole \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev &&\
|
||||
apt-get clean && apt-get autoclean
|
||||
|
||||
ARG GPU_DRIVER=cuda
|
||||
|
||||
# Install `uv` for package management
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
ENV INVOKEAI_ROOT=/invokeai
|
||||
ENV INVOKEAI_HOST=0.0.0.0
|
||||
ENV INVOKEAI_PORT=9090
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
||||
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
|
||||
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv python install ${PYTHON_VERSION} && \
|
||||
# chmod --recursive a+rX /root/.local/share/uv/python
|
||||
chmod 711 /root
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||
#
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is the default
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
ulimit -n 30000 && \
|
||||
uv sync --extra $GPU_DRIVER --frozen
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4
|
||||
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
||||
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# Link amdgpu.ids for ROCm builds
|
||||
# contributed by https://github.com/Rubonnek
|
||||
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" && groupadd render
|
||||
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
RUN python3 -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web"]
|
||||
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# add sources last to minimize image changes on code changes
|
||||
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||
|
||||
# this should not increase image size because we've already installed dependencies
|
||||
# in a previous layer
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
ulimit -n 30000 && \
|
||||
uv pip install -e .[$GPU_DRIVER]
|
||||
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
#### Web UI ------------------------------------
|
||||
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN npx vite build
|
||||
|
||||
## Backend ---------------------------------------
|
||||
|
||||
FROM library/ubuntu:24.04
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
git \
|
||||
gosu \
|
||||
libglib2.0-0 \
|
||||
libgl1 \
|
||||
libglx-mesa0 \
|
||||
build-essential \
|
||||
libopencv-dev \
|
||||
libstdc++-10-dev \
|
||||
wget
|
||||
|
||||
ENV \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
VIRTUAL_ENV=/opt/venv \
|
||||
INVOKEAI_SRC=/opt/invokeai \
|
||||
PYTHON_VERSION=3.12 \
|
||||
UV_PYTHON=3.12 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_MANAGED_PYTHON=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/venv \
|
||||
INVOKEAI_ROOT=/invokeai \
|
||||
INVOKEAI_HOST=0.0.0.0 \
|
||||
INVOKEAI_PORT=9090 \
|
||||
PATH="/opt/venv/bin:$PATH" \
|
||||
CONTAINER_UID=${CONTAINER_UID:-1000} \
|
||||
CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
|
||||
ARG GPU_DRIVER=cuda
|
||||
|
||||
# Install `uv` for package management
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
|
||||
|
||||
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv python install ${PYTHON_VERSION} && \
|
||||
# chmod --recursive a+rX /root/.local/share/uv/python
|
||||
chmod 711 /root
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
|
||||
# bind-mount instead of copy to defer adding sources to the image until next layer.
|
||||
#
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is the default
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
|
||||
--mount=type=bind,source=invokeai/version,target=invokeai/version \
|
||||
ulimit -n 30000 && \
|
||||
uv sync --extra $GPU_DRIVER --frozen
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt \
|
||||
--mount=type=cache,target=/var/lib/apt \
|
||||
if [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
wget -O /tmp/amdgpu-install.deb \
|
||||
https://repo.radeon.com/amdgpu-install/6.3.4/ubuntu/noble/amdgpu-install_6.3.60304-1_all.deb && \
|
||||
apt install -y /tmp/amdgpu-install.deb && \
|
||||
apt update && \
|
||||
amdgpu-install --usecase=rocm -y && \
|
||||
apt-get autoclean && \
|
||||
apt clean && \
|
||||
rm -rf /tmp/* /var/tmp/* && \
|
||||
usermod -a -G render ubuntu && \
|
||||
usermod -a -G video ubuntu && \
|
||||
echo "\\n/opt/rocm/lib\\n/opt/rocm/lib64" >> /etc/ld.so.conf.d/rocm.conf && \
|
||||
ldconfig && \
|
||||
update-alternatives --auto rocm; \
|
||||
fi
|
||||
|
||||
## Heathen711: Leaving this for review input, will remove before merge
|
||||
# RUN --mount=type=cache,target=/var/cache/apt \
|
||||
# --mount=type=cache,target=/var/lib/apt \
|
||||
# if [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
# groupadd render && \
|
||||
# usermod -a -G render ubuntu && \
|
||||
# usermod -a -G video ubuntu; \
|
||||
# fi
|
||||
|
||||
## Link amdgpu.ids for ROCm builds
|
||||
## contributed by https://github.com/Rubonnek
|
||||
# RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||
# ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||
|
||||
# build patchmatch
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web"]
|
||||
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
|
||||
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||
|
||||
# add sources last to minimize image changes on code changes
|
||||
COPY invokeai ${INVOKEAI_SRC}/invokeai
|
||||
|
||||
# this should not increase image size because we've already installed dependencies
|
||||
# in a previous layer
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
ulimit -n 30000 && \
|
||||
uv pip install -e .[$GPU_DRIVER]
|
||||
|
||||
@@ -109,7 +109,7 @@ CONTAINER_UID=1000
|
||||
GPU_DRIVER=cuda
|
||||
```
|
||||
|
||||
Any environment variables supported by InvokeAI can be set here. See the [Configuration docs](https://invoke.ai/configuration/invokeai-yaml/) for further detail.
|
||||
Any environment variables supported by InvokeAI can be set here. See the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -47,9 +47,8 @@ services:
|
||||
|
||||
invokeai-rocm:
|
||||
<<: *invokeai
|
||||
environment:
|
||||
- AMD_VISIBLE_DEVICES=all
|
||||
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
|
||||
runtime: amd
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
profiles:
|
||||
- rocm
|
||||
|
||||
@@ -16,22 +16,8 @@ set -e -o pipefail
|
||||
|
||||
USER_ID=${CONTAINER_UID:-1000}
|
||||
USER=ubuntu
|
||||
# if the user does not exist, create it. It is expected to be present on ubuntu >=24.x
|
||||
_=$(id ${USER} 2>&1) || useradd -u ${USER_ID} ${USER}
|
||||
# ensure the UID is correct
|
||||
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||
|
||||
## ROCM specific configuration
|
||||
# render group within the container must match the host render group
|
||||
# otherwise the container will not be able to access the host GPU.
|
||||
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
|
||||
# ensure the render group exists
|
||||
groupmod -g ${RENDER_GROUP_ID} render
|
||||
usermod -a -G render ${USER}
|
||||
usermod -a -G video ${USER}
|
||||
fi
|
||||
|
||||
|
||||
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||
# We do not install openssh-server in the image by default to avoid bloat.
|
||||
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||
@@ -50,8 +36,6 @@ fi
|
||||
mkdir -p "${INVOKEAI_ROOT}"
|
||||
chown --recursive ${USER} "${INVOKEAI_ROOT}" || true
|
||||
cd "${INVOKEAI_ROOT}"
|
||||
export HF_HOME=${HF_HOME:-$INVOKEAI_ROOT/.cache/huggingface}
|
||||
export MPLCONFIGDIR=${MPLCONFIGDIR:-$INVOKEAI_ROOT/.matplotlib}
|
||||
|
||||
# Run the CMD as the Container User (not root).
|
||||
exec gosu ${USER} "$@"
|
||||
|
||||
@@ -13,7 +13,7 @@ run() {
|
||||
|
||||
# parse .env file for build args
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER=/ {print $2}' .env)"
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
|
||||
# default to 'cuda' profile
|
||||
[[ -z "$profile" ]] && profile="cuda"
|
||||
@@ -30,7 +30,7 @@ run() {
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
docker compose --profile "$profile" up -d "$service_name"
|
||||
docker compose --profile "$profile" logs -f
|
||||
docker compose logs -f
|
||||
}
|
||||
|
||||
run
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
# Release Process
|
||||
|
||||
The Invoke application is published as a python package on [PyPI]. This includes both a source distribution and built distribution (a wheel).
|
||||
|
||||
Most users install it with the [Launcher](https://github.com/invoke-ai/launcher/), others with `pip`.
|
||||
|
||||
The launcher uses GitHub as the source of truth for available releases.
|
||||
|
||||
## Broad Strokes
|
||||
|
||||
- Merge all changes and bump the version in the codebase.
|
||||
- Tag the release commit.
|
||||
- Wait for the release workflow to complete.
|
||||
- Approve the PyPI publish jobs.
|
||||
- Write GH release notes.
|
||||
|
||||
## General Prep
|
||||
|
||||
Make a developer call-out for PRs to merge. Merge and test things
|
||||
out. Create a branch with a name like user/chore/vX.X.X-prep and bump the version by editing
|
||||
`invokeai/version/invokeai_version.py` and commit locally.
|
||||
|
||||
## Release Workflow
|
||||
|
||||
The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
|
||||
|
||||
It is triggered on **tag push**, when the tag matches `v*`.
|
||||
|
||||
### Triggering the Workflow
|
||||
|
||||
Ensure all commits that should be in the release are merged into this branch, and that you have pulled them locally.
|
||||
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow. You will be prompted to provide a message - use the version specifier.
|
||||
|
||||
If this version's tag already exists for some reason (maybe you had to make a last minute change), the script will overwrite it.
|
||||
|
||||
Push the commit to trigger the workflow.
|
||||
|
||||
> In case you cannot use the Make target, the release may also be dispatched [manually] via GH.
|
||||
|
||||
### Workflow Jobs and Process
|
||||
|
||||
The workflow consists of a number of concurrently-run checks and tests, then two final publish jobs.
|
||||
|
||||
The publish jobs require manual approval and are only run if the other jobs succeed.
|
||||
|
||||
#### `check-version` Job
|
||||
|
||||
This job ensures that the `invokeai` python package version specifier matches the tag for the release. The version specifier is pulled from the `__version__` variable in `invokeai/version/invokeai_version.py`.
|
||||
|
||||
This job uses [samuelcolvin/check-python-version].
|
||||
|
||||
> Any valid [version specifier] works, so long as the tag matches the version. The release workflow works exactly the same for `RC`, `post`, `dev`, etc.
|
||||
|
||||
#### Check and Test Jobs
|
||||
|
||||
Next, these jobs run and must pass. They are the same jobs that are run for every PR.
|
||||
|
||||
- **`python-tests`**: runs `pytest` on matrix of platforms
|
||||
- **`python-checks`**: runs `ruff` (format and lint)
|
||||
- **`frontend-tests`**: runs `vitest`
|
||||
- **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports)
|
||||
- **`typegen-checks`**: ensures the frontend and backend types are synced
|
||||
|
||||
#### `build-wheel` Job
|
||||
|
||||
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `./scripts/build_wheel.sh` and uploads `dist.zip`, which contains the wheel and unarchived build.
|
||||
|
||||
You don't need to download or test these artifacts.
|
||||
|
||||
#### Sanity Check & Smoke Test
|
||||
|
||||
At this point, the release workflow pauses as the remaining publish jobs require approval.
|
||||
|
||||
It's possible to test the python package before it gets published to PyPI. We've never had problems with it, so it's not necessary to do this.
|
||||
|
||||
But, if you want to be extra-super careful, here's how to test it:
|
||||
|
||||
- Download the `dist.zip` build artifact from the `build-wheel` job
|
||||
- Unzip it and find the wheel file
|
||||
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/) - but instead of installing from PyPI, install from the wheel
|
||||
- Test the app
|
||||
|
||||
##### Something isn't right
|
||||
|
||||
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?) and start over.
|
||||
|
||||
#### PyPI Publish Jobs
|
||||
|
||||
The publish jobs will not run if any of the previous jobs fail.
|
||||
|
||||
They use [GitHub environments], which are configured as [trusted publishers] on PyPI.
|
||||
|
||||
Both jobs require a @lstein or @blessedcoolant to approve them from the workflow's **Summary** tab.
|
||||
|
||||
- Click the **Review deployments** button
|
||||
- Select the environment (either `testpypi` or `pypi` - typically you select both)
|
||||
- Click **Approve and deploy**
|
||||
|
||||
> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
|
||||
|
||||
##### Failing PyPI Publish
|
||||
|
||||
Check the [python infrastructure status page] for incidents.
|
||||
|
||||
If there are no incidents, contact @lstein or @blessedcoolant, who have owner access to GH and PyPI, to see if access has expired or something like that.
|
||||
|
||||
#### `publish-testpypi` Job
|
||||
|
||||
Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment.
|
||||
|
||||
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release for some reason:
|
||||
|
||||
- Approve this publish job without approving the prod publish
|
||||
- Let it finish
|
||||
- Create a fresh Invoke install by following the [manual install guide](https://invoke-ai.github.io/InvokeAI/installation/manual/), making sure to use the Test PyPI index URL: `https://test.pypi.org/simple/`
|
||||
- Test the app
|
||||
|
||||
#### `publish-pypi` Job
|
||||
|
||||
Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
|
||||
|
||||
It's a good idea to wait to approve and run this job until you have the release notes ready!
|
||||
|
||||
## Prep and publish the GitHub Release
|
||||
|
||||
1. [Draft a new release] on GitHub, choosing the tag that triggered the release.
|
||||
2. The **Generate release notes** button automatically inserts the changelog and new contributors. Make sure to select the correct tags for this release and the last stable release. GH often selects the wrong tags - do this manually.
|
||||
3. Write the release notes, describing important changes. Contributions from community members should be shouted out. Use the GH-generated changelog to see all contributors. If there are Weblate translation updates, open that PR and shout out every person who contributed a translation.
|
||||
4. Check **Set as a pre-release** if it's a pre-release.
|
||||
5. Approve and wait for the `publish-pypi` job to finish if you haven't already.
|
||||
6. Publish the GH release.
|
||||
7. Post the release in Discord in the [releases](https://discord.com/channels/1020123559063990373/1149260708098359327) channel with abbreviated notes. For example:
|
||||
> Invoke v5.7.0 (stable): <https://github.com/invoke-ai/InvokeAI/releases/tag/v5.7.0>
|
||||
>
|
||||
> It's a pretty big one - Form Builder, Metadata Nodes (thanks @SkunkWorxDark!), and much more.
|
||||
8. Right click the message in releases and copy the link to it. Then, post that link in the [new-release-discussion](https://discord.com/channels/1020123559063990373/1149506274971631688) channel. For example:
|
||||
> Invoke v5.7.0 (stable): <https://discord.com/channels/1020123559063990373/1149260708098359327/1344521744916021248>
|
||||
|
||||
## Manual Release
|
||||
|
||||
The `release` workflow can be dispatched manually. You must dispatch the workflow from the right tag, else it will fail the version check.
|
||||
|
||||
This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above.
|
||||
|
||||
[PyPI]: https://pypi.org/
|
||||
[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
|
||||
[Test PyPI]: https://test.pypi.org/
|
||||
[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
|
||||
[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
|
||||
[trusted publishers]: https://docs.pypi.org/trusted-publishers/
|
||||
[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version
|
||||
[manually]: #manual-release
|
||||
[python infrastructure status page]: https://status.python.org/
|
||||
|
Before Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 17 KiB |
@@ -1,205 +0,0 @@
|
||||
# Canvas Projects — Technical Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
Canvas Projects provide a save/load mechanism for the entire canvas state. The feature serializes all canvas entities, generation parameters, reference images, and their associated image files into a ZIP-based `.invk` file. On load, it restores the full state, handling image deduplication and re-uploading as needed.
|
||||
|
||||
## File Format
|
||||
|
||||
The `.invk` file is a standard ZIP archive with the following structure:
|
||||
|
||||
```
|
||||
project.invk
|
||||
├── manifest.json
|
||||
├── canvas_state.json
|
||||
├── params.json
|
||||
├── ref_images.json
|
||||
├── loras.json
|
||||
└── images/
|
||||
├── {image_name_1}.png
|
||||
├── {image_name_2}.png
|
||||
└── ...
|
||||
```
|
||||
|
||||
### manifest.json
|
||||
|
||||
Schema version and metadata. Validated on load with Zod.
|
||||
|
||||
```json
|
||||
{
|
||||
"version": 1,
|
||||
"appVersion": "5.12.0",
|
||||
"createdAt": "2026-02-26T12:00:00.000Z",
|
||||
"name": "My Canvas Project"
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|---|---|---|
|
||||
| `version` | `number` | Schema version, currently `1`. Used for migration logic on load. |
|
||||
| `appVersion` | `string` | InvokeAI version that created the file. Informational only. |
|
||||
| `createdAt` | `string` | ISO 8601 timestamp. |
|
||||
| `name` | `string` | User-provided project name. Also used as the download filename. |
|
||||
|
||||
### canvas_state.json
|
||||
|
||||
The serialized canvas entity tree. Type: `CanvasProjectState`.
|
||||
|
||||
```typescript
|
||||
type CanvasProjectState = {
|
||||
rasterLayers: CanvasRasterLayerState[];
|
||||
controlLayers: CanvasControlLayerState[];
|
||||
inpaintMasks: CanvasInpaintMaskState[];
|
||||
regionalGuidance: CanvasRegionalGuidanceState[];
|
||||
bbox: CanvasState['bbox'];
|
||||
selectedEntityIdentifier: CanvasState['selectedEntityIdentifier'];
|
||||
bookmarkedEntityIdentifier: CanvasState['bookmarkedEntityIdentifier'];
|
||||
};
|
||||
```
|
||||
|
||||
Each entity contains its full state including all canvas objects (brush lines, eraser lines, rect shapes, images). Image objects reference files by `image_name` which correspond to files in the `images/` folder.
|
||||
|
||||
### params.json
|
||||
|
||||
The complete generation parameters state (`ParamsState`). Optional on load (older files may not have it). This includes all fields from the params Redux slice:
|
||||
|
||||
- Prompts (positive, negative, prompt history)
|
||||
- Core generation settings (seed, steps, CFG scale, guidance, scheduler, iterations)
|
||||
- Model selections (main model, VAE, FLUX VAE, T5 encoder, CLIP embed models, refiner, Z-Image models, Klein models)
|
||||
- Dimensions (width, height, aspect ratio)
|
||||
- Img2img strength
|
||||
- Infill settings (method, tile size, patchmatch downscale, color)
|
||||
- Canvas coherence settings (mode, edge size, min denoise)
|
||||
- Refiner parameters (steps, CFG scale, scheduler, aesthetic scores, start)
|
||||
- FLUX-specific settings (scheduler, DyPE preset/scale/exponent)
|
||||
- Z-Image-specific settings (scheduler, seed variance)
|
||||
- Upscale settings (scheduler, CFG scale)
|
||||
- Seamless tiling, mask blur, CLIP skip, VAE precision, CPU noise, color compensation
|
||||
|
||||
### ref_images.json
|
||||
|
||||
Global reference image entities (`RefImageState[]`). These are IP-Adapter / FLUX Redux configs with `CroppableImageWithDims` containing both original and cropped image references. Optional on load.
|
||||
|
||||
### loras.json
|
||||
|
||||
Array of LoRA configurations (`LoRA[]`). Each entry contains:
|
||||
|
||||
```typescript
|
||||
type LoRA = {
|
||||
id: string;
|
||||
isEnabled: boolean;
|
||||
model: ModelIdentifierField;
|
||||
weight: number;
|
||||
};
|
||||
```
|
||||
|
||||
Optional on load. Like models, LoRA identifiers are stored as-is — if a LoRA is not installed when loading, the entry is restored but may not be usable.
|
||||
|
||||
### images/
|
||||
|
||||
All image files referenced anywhere in the state. Keyed by their original `image_name`. On save, each image is fetched from the backend via `GET /api/v1/images/i/{name}/full` and stored as-is.
|
||||
|
||||
## Key Source Files
|
||||
|
||||
| File | Purpose |
|
||||
|---|---|
|
||||
| `features/controlLayers/util/canvasProjectFile.ts` | Types, constants, image name collection, remapping, existence checking |
|
||||
| `features/controlLayers/hooks/useCanvasProjectSave.ts` | Save hook — collects Redux state, fetches images, builds ZIP |
|
||||
| `features/controlLayers/hooks/useCanvasProjectLoad.ts` | Load hook — parses ZIP, deduplicates images, dispatches state |
|
||||
| `features/controlLayers/components/SaveCanvasProjectDialog.tsx` | Save name dialog + `useSaveCanvasProjectWithDialog` hook |
|
||||
| `features/controlLayers/components/LoadCanvasProjectConfirmationAlertDialog.tsx` | Load confirmation dialog + `useLoadCanvasProjectWithDialog` hook |
|
||||
| `features/controlLayers/components/Toolbar/CanvasToolbarProjectMenuButton.tsx` | Toolbar dropdown UI |
|
||||
| `features/controlLayers/store/canvasSlice.ts` | `canvasProjectRecalled` Redux action |
|
||||
|
||||
## Save Flow
|
||||
|
||||
1. User clicks "Save Canvas Project" → `SaveCanvasProjectDialog` opens asking for a project name
|
||||
2. On confirm, `saveCanvasProject(name)` is called
|
||||
3. Read Redux state via selectors: `selectCanvasSlice()`, `selectParamsSlice()`, `selectRefImagesSlice()`, `selectLoRAsSlice()`
|
||||
4. Build `CanvasProjectState` from the canvas slice; use `paramsState` directly for params
|
||||
5. Walk all entities to collect every `image_name` reference via `collectImageNames()`:
|
||||
- `CanvasImageState.image.image_name` in layer/mask objects
|
||||
- `CroppableImageWithDims.original.image.image_name` in global ref images
|
||||
- `CroppableImageWithDims.crop.image.image_name` in cropped ref images
|
||||
- `ImageWithDims.image_name` in regional guidance ref images
|
||||
6. Fetch each image from the backend API
|
||||
7. Build ZIP with JSZip: add `manifest.json` (including `name`), `canvas_state.json`, `params.json`, `ref_images.json`, and all images into `images/`
|
||||
8. Sanitize the name for filesystem use and generate blob, trigger download as `{name}.invk`
|
||||
|
||||
## Load Flow
|
||||
|
||||
1. User selects `.invk` file → confirmation dialog opens
|
||||
2. On confirm, parse ZIP with JSZip
|
||||
3. Validate manifest version via Zod schema
|
||||
4. Read `canvas_state.json`, `params.json` (optional), `ref_images.json` (optional)
|
||||
5. Collect all `image_name` references from the loaded state
|
||||
6. **Deduplicate images**: for each referenced image, check if it exists on the server via `getImageDTOSafe(image_name)`
|
||||
- Already exists → skip (no upload)
|
||||
- Missing → upload from ZIP via `uploadImage()`, record `oldName → newName` mapping
|
||||
7. Remap all `image_name` values in the loaded state using the mapping (only for re-uploaded images whose names changed)
|
||||
8. Dispatch Redux actions:
|
||||
- `canvasProjectRecalled()` — restores all canvas entities, bbox, selected/bookmarked entity
|
||||
- `refImagesRecalled()` — restores global reference images
|
||||
- `paramsRecalled()` — replaces the entire params state in one action
|
||||
- `loraAllDeleted()` + `loraRecalled()` — restores LoRAs
|
||||
9. Show success/error toast
|
||||
|
||||
## Image Name Collection & Remapping
|
||||
|
||||
The `canvasProjectFile.ts` utility provides two parallel sets of functions:
|
||||
|
||||
**Collection** (`collectImageNames`): Walks the entire state tree and returns a `Set<string>` of all referenced `image_name` values. This is used by both save (to know which images to fetch) and load (to know which images to check/upload).
|
||||
|
||||
**Remapping** (`remapCanvasState`, `remapRefImages`): Deep-clones state objects and replaces `image_name` values using a `Map<string, string>` mapping. Only images that were re-uploaded with a different name are remapped. Images that already existed on the server are left unchanged.
|
||||
|
||||
Both walk the same paths through the state tree:
|
||||
- Layer/mask objects → `CanvasImageState.image.image_name`
|
||||
- Regional guidance ref images → `ImageWithDims.image_name`
|
||||
- Global ref images → `CroppableImageWithDims.original.image.image_name` and `.crop.image.image_name`
|
||||
|
||||
## Extending the Format
|
||||
|
||||
### Adding new optional data (non-breaking)
|
||||
|
||||
Add a new JSON file to the ZIP. No version bump needed.
|
||||
|
||||
1. **Save**: Add `zip.file('new_data.json', JSON.stringify(data))` in `useCanvasProjectSave.ts`
|
||||
2. **Load**: Read with `zip.file('new_data.json')` in `useCanvasProjectLoad.ts` — check for `null` so older project files without it still load
|
||||
3. **Dispatch**: Add the appropriate Redux action to restore the data
|
||||
|
||||
### Adding new entity types with images
|
||||
|
||||
1. Extend `CanvasProjectState` type in `canvasProjectFile.ts`
|
||||
2. Add collection logic in `collectImageNames()` to walk the new entity's objects
|
||||
3. Add remapping logic in `remapCanvasState()` to update image names
|
||||
4. Include the new entity array in both save and load hooks
|
||||
5. Handle it in the `canvasProjectRecalled` reducer in `canvasSlice.ts`
|
||||
|
||||
### Breaking schema changes
|
||||
|
||||
1. Bump `CANVAS_PROJECT_VERSION` in `canvasProjectFile.ts`
|
||||
2. Update the Zod manifest schema: `version: z.union([z.literal(1), z.literal(2)])`
|
||||
3. Add migration logic in the load hook: check version, transform v1 → v2 before dispatching
|
||||
|
||||
## UI Architecture
|
||||
|
||||
### Save dialog
|
||||
|
||||
The save flow uses a **nanostore atom** (`$isOpen`) to control the `SaveCanvasProjectDialog`:
|
||||
|
||||
1. `useSaveCanvasProjectWithDialog()` — returns a callback that sets `$isOpen` to `true`
|
||||
2. `SaveCanvasProjectDialog` (singleton in `GlobalModalIsolator`) — renders an `AlertDialog` with a name input
|
||||
3. On save → calls `saveCanvasProject(name)` and closes the dialog
|
||||
4. On cancel → closes the dialog
|
||||
|
||||
### Load dialog
|
||||
|
||||
The load flow uses a **nanostore atom** (`$pendingFile`) to decouple the file dialog from the confirmation dialog:
|
||||
|
||||
1. `useLoadCanvasProjectWithDialog()` — opens a programmatic file input (`document.createElement('input')`)
|
||||
2. On file selection → sets `$pendingFile` atom
|
||||
3. `LoadCanvasProjectConfirmationAlertDialog` (singleton in `GlobalModalIsolator`) — subscribes to `$pendingFile` via `useStore()`
|
||||
4. On accept → calls `loadCanvasProject(file)` and clears the atom
|
||||
5. On cancel → clears the atom
|
||||
|
||||
The programmatic file input approach was chosen because the context menu component uses `isLazy: true`, which unmounts the DOM tree when the menu closes — a hidden `<input>` element inside the menu would be destroyed before the file dialog returns.
|
||||
@@ -1,295 +0,0 @@
|
||||
# Hotkeys System
|
||||
|
||||
This document describes the technical implementation of the customizable hotkeys system in InvokeAI.
|
||||
|
||||
> **Note:** For user-facing documentation on how to use customizable hotkeys, see [Hotkeys Feature Documentation](../features/hotkeys.md).
|
||||
|
||||
## Overview
|
||||
|
||||
The hotkeys system allows users to customize keyboard shortcuts throughout the application. All hotkeys are:
|
||||
- Centrally defined and managed
|
||||
- Customizable by users
|
||||
- Persisted across sessions
|
||||
- Type-safe and validated
|
||||
|
||||
## Architecture
|
||||
|
||||
The customizable hotkeys feature is built on top of the existing hotkey system with the following components:
|
||||
|
||||
### 1. Hotkeys State Slice (`hotkeysSlice.ts`)
|
||||
|
||||
Location: `invokeai/frontend/web/src/features/system/store/hotkeysSlice.ts`
|
||||
|
||||
**Responsibilities:**
|
||||
- Stores custom hotkey mappings in Redux state
|
||||
- Persisted to IndexedDB using `redux-remember`
|
||||
- Provides actions to change, reset individual, or reset all hotkeys
|
||||
|
||||
**State Shape:**
|
||||
```typescript
|
||||
{
|
||||
_version: 1,
|
||||
customHotkeys: {
|
||||
'app.invoke': ['mod+enter'],
|
||||
'canvas.undo': ['mod+z'],
|
||||
// ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Actions:**
|
||||
- `hotkeyChanged(id, hotkeys)` - Update a single hotkey
|
||||
- `hotkeyReset(id)` - Reset a single hotkey to default
|
||||
- `allHotkeysReset()` - Reset all hotkeys to defaults
|
||||
|
||||
### 2. useHotkeyData Hook (`useHotkeyData.ts`)
|
||||
|
||||
Location: `invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts`
|
||||
|
||||
**Responsibilities:**
|
||||
- Defines all default hotkeys
|
||||
- Merges default hotkeys with custom hotkeys from the store
|
||||
- Returns the effective hotkeys that should be used throughout the app
|
||||
- Provides platform-specific key translations (Ctrl/Cmd, Alt/Option)
|
||||
|
||||
**Key Functions:**
|
||||
- `useHotkeyData()` - Returns all hotkeys organized by category
|
||||
- `useRegisteredHotkeys()` - Hook to register a hotkey in a component
|
||||
|
||||
### 3. HotkeyEditor Component (`HotkeyEditor.tsx`)
|
||||
|
||||
Location: `invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeyEditor.tsx`
|
||||
|
||||
**Features:**
|
||||
- Inline editor with input field
|
||||
- Modifier buttons (Mod, Ctrl, Shift, Alt) for quick insertion
|
||||
- Live preview of hotkey combinations
|
||||
- Validation with visual feedback
|
||||
- Help tooltip with syntax examples
|
||||
- Save/cancel/reset buttons
|
||||
|
||||
**Smart Features:**
|
||||
- Automatic `+` insertion between modifiers
|
||||
- Cursor position preservation
|
||||
- Validation prevents invalid combinations (e.g., modifier-only keys)
|
||||
|
||||
### 4. HotkeysModal Component (`HotkeysModal.tsx`)
|
||||
|
||||
Location: `invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModal.tsx`
|
||||
|
||||
**Features:**
|
||||
- View Mode / Edit Mode toggle
|
||||
- Search functionality
|
||||
- Category-based organization
|
||||
- Shows HotkeyEditor components when in edit mode
|
||||
- "Reset All to Default" button in edit mode
|
||||
|
||||
## Data Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ 1. User opens Hotkeys Modal │
|
||||
│ 2. User clicks "Edit Mode" button │
|
||||
│ 3. User clicks edit icon next to a hotkey │
|
||||
│ 4. User enters new hotkey(s) using editor │
|
||||
│ 5. User clicks save or presses Enter │
|
||||
│ 6. Custom hotkey stored via hotkeyChanged() action │
|
||||
│ 7. Redux state persisted to IndexedDB (redux-remember) │
|
||||
│ 8. useHotkeyData() hook picks up the change │
|
||||
│ 9. All components using useRegisteredHotkeys() get update │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Hotkey Format
|
||||
|
||||
Hotkeys use the format from `react-hotkeys-hook` library:
|
||||
|
||||
- **Modifiers:** `mod`, `ctrl`, `shift`, `alt`, `meta`
|
||||
- **Keys:** Letters, numbers, function keys, special keys
|
||||
- **Separator:** `+` between keys in a combination
|
||||
- **Multiple hotkeys:** Comma-separated (e.g., `mod+a, ctrl+b`)
|
||||
|
||||
**Examples:**
|
||||
- `mod+enter` - Mod key + Enter
|
||||
- `shift+x` - Shift + X
|
||||
- `ctrl+shift+a` - Control + Shift + A
|
||||
- `f1, f2` - F1 or F2 (alternatives)
|
||||
|
||||
## Developer Guide
|
||||
|
||||
### Using Hotkeys in Components
|
||||
|
||||
To use a hotkey in a component:
|
||||
|
||||
```tsx
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
|
||||
const MyComponent = () => {
|
||||
const handleAction = useCallback(() => {
|
||||
// Your action here
|
||||
}, []);
|
||||
|
||||
// This automatically uses custom hotkeys if configured
|
||||
useRegisteredHotkeys({
|
||||
id: 'myAction',
|
||||
category: 'app', // or 'canvas', 'viewer', 'gallery', 'workflows'
|
||||
callback: handleAction,
|
||||
options: { enabled: true, preventDefault: true },
|
||||
dependencies: [handleAction]
|
||||
});
|
||||
|
||||
// ...
|
||||
};
|
||||
```
|
||||
|
||||
**Options:**
|
||||
- `enabled` - Whether the hotkey is active
|
||||
- `preventDefault` - Prevent default browser behavior
|
||||
- `enableOnFormTags` - Allow hotkey in form elements (default: false)
|
||||
|
||||
### Adding New Hotkeys
|
||||
|
||||
To add a new hotkey to the system:
|
||||
|
||||
#### 1. Add Translation Strings
|
||||
|
||||
In `invokeai/frontend/web/public/locales/en.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"hotkeys": {
|
||||
"app": {
|
||||
"myAction": {
|
||||
"title": "My Action",
|
||||
"desc": "Description of what this hotkey does"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 2. Register the Hotkey
|
||||
|
||||
In `invokeai/frontend/web/src/features/system/components/HotkeysModal/useHotkeyData.ts`:
|
||||
|
||||
```typescript
|
||||
// Inside the appropriate category builder function
|
||||
addHotkey('app', 'myAction', ['mod+k']); // Default binding
|
||||
```
|
||||
|
||||
#### 3. Use the Hotkey
|
||||
|
||||
In your component:
|
||||
|
||||
```typescript
|
||||
useRegisteredHotkeys({
|
||||
id: 'myAction',
|
||||
category: 'app',
|
||||
callback: handleMyAction,
|
||||
options: { enabled: true },
|
||||
dependencies: [handleMyAction]
|
||||
});
|
||||
```
|
||||
|
||||
### Hotkey Categories
|
||||
|
||||
Current categories:
|
||||
- **app** - Global application hotkeys
|
||||
- **canvas** - Canvas/drawing operations
|
||||
- **viewer** - Image viewer operations
|
||||
- **gallery** - Gallery/image grid operations
|
||||
- **workflows** - Node workflow editor
|
||||
|
||||
To add a new category, update `useHotkeyData.ts` and add translations.
|
||||
|
||||
## Testing
|
||||
|
||||
Tests are located in `invokeai/frontend/web/src/features/system/store/hotkeysSlice.test.ts`.
|
||||
|
||||
**Test Coverage:**
|
||||
- Adding custom hotkeys
|
||||
- Updating existing custom hotkeys
|
||||
- Resetting individual hotkeys
|
||||
- Resetting all hotkeys
|
||||
- State persistence and migration
|
||||
|
||||
Run tests with:
|
||||
|
||||
```bash
|
||||
cd invokeai/frontend/web
|
||||
pnpm test:no-watch
|
||||
```
|
||||
|
||||
## Persistence
|
||||
|
||||
Custom hotkeys are persisted using the same mechanism as other app settings:
|
||||
|
||||
- Stored in Redux state under the `hotkeys` slice
|
||||
- Persisted to IndexedDB via `redux-remember`
|
||||
- Automatically loaded when the app starts
|
||||
- Survives page refreshes and browser restarts
|
||||
- Includes migration support for state schema changes
|
||||
|
||||
**State Location:**
|
||||
- IndexedDB database: `invoke`
|
||||
- Store key: `hotkeys`
|
||||
|
||||
## Dependencies
|
||||
|
||||
- **react-hotkeys-hook** (v4.5.0) - Core hotkey handling
|
||||
- **@reduxjs/toolkit** - State management
|
||||
- **redux-remember** - Persistence
|
||||
- **zod** - State validation
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use `mod` instead of `ctrl`** - Automatically maps to Cmd on Mac, Ctrl elsewhere
|
||||
2. **Provide descriptive translations** - Help users understand what each hotkey does
|
||||
3. **Avoid conflicts** - Check existing hotkeys before adding new ones
|
||||
4. **Use preventDefault** - Prevent browser default behavior when appropriate
|
||||
5. **Check enabled state** - Only activate hotkeys when the action is available
|
||||
6. **Use dependencies correctly** - Ensure callbacks are stable with useCallback
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Conditional Hotkeys
|
||||
|
||||
```typescript
|
||||
useRegisteredHotkeys({
|
||||
id: 'save',
|
||||
category: 'app',
|
||||
callback: handleSave,
|
||||
options: {
|
||||
enabled: hasUnsavedChanges && !isLoading, // Only when valid
|
||||
preventDefault: true
|
||||
},
|
||||
dependencies: [hasUnsavedChanges, isLoading, handleSave]
|
||||
});
|
||||
```
|
||||
|
||||
### Multiple Hotkeys for Same Action
|
||||
|
||||
```typescript
|
||||
// In useHotkeyData.ts
|
||||
addHotkey('canvas', 'redo', ['mod+shift+z', 'mod+y']); // Two alternatives
|
||||
```
|
||||
|
||||
### Focus-Scoped Hotkeys
|
||||
|
||||
```typescript
|
||||
import { useFocusRegion } from 'common/hooks/focus';
|
||||
|
||||
const MyComponent = () => {
|
||||
const focusRegionRef = useFocusRegion('myRegion');
|
||||
|
||||
// Hotkey only works when this region has focus
|
||||
useRegisteredHotkeys({
|
||||
id: 'myAction',
|
||||
category: 'app',
|
||||
callback: handleAction,
|
||||
options: { enabled: true }
|
||||
});
|
||||
|
||||
return <div ref={focusRegionRef}>...</div>;
|
||||
};
|
||||
```
|
||||
@@ -1,64 +0,0 @@
|
||||
# Pull Request Merge Policy
|
||||
|
||||
This document outlines the process for reviewing and merging pull requests (PRs) into the InvokeAI repository.
|
||||
|
||||
## Review Process
|
||||
|
||||
### 1. Assignment
|
||||
|
||||
One of the repository maintainers will assign collaborators to review a pull request. The assigned reviewer(s) will be responsible for conducting the code review.
|
||||
|
||||
### 2. Review and Iteration
|
||||
|
||||
The assignee is responsible for:
|
||||
- Reviewing the PR thoroughly
|
||||
- Providing constructive feedback
|
||||
- Iterating with the PR author until the assignee is satisfied that the PR is fit to merge
|
||||
- Ensuring the PR meets code quality standards, follows project conventions, and doesn't introduce bugs or regressions
|
||||
|
||||
### 3. Approval and Notification
|
||||
|
||||
Once the assignee is satisfied with the PR:
|
||||
- The assignee approves the PR
|
||||
- The assignee alerts one of the maintainers that the PR is ready for merge using the **#request-reviews Discord channel**
|
||||
|
||||
### 4. Final Merge
|
||||
|
||||
One of the maintainers is responsible for:
|
||||
- Performing a final check of the PR
|
||||
- Merging the PR into the appropriate branch
|
||||
|
||||
**Important:** Collaborators are strongly discouraged from merging PRs on their own, except in case of emergency (e.g., critical bug fix and no maintainer is available).
|
||||
|
||||
### 5. Release Policy
|
||||
|
||||
Once a feature release candidate is published, no feature PRs are to
|
||||
be merged into main. Only bugfixes are allowed until the final
|
||||
release.
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Clean Commit History
|
||||
|
||||
To encourage a clean development log, PR authors are encouraged to use `git rebase -i` to suppress trivial commit messages (e.g., `ruff` and `prettier` formatting fixes) after the PR is accepted but before it is merged.
|
||||
|
||||
### Merge Strategy
|
||||
|
||||
The maintainer will perform either a **3-way merge** or **squash merge** when merging a PR into the `main` branch. This approach helps avoid rebase conflict hell and maintains a cleaner project history.
|
||||
|
||||
### Attribution
|
||||
|
||||
The PR author should reference any papers, source code or
|
||||
documentation that they used while creating the code both in the PR
|
||||
and as comments in the code itself. If there are any licensing
|
||||
restrictions, these should be linked to and/or reproduced in the repo
|
||||
root.
|
||||
|
||||
|
||||
## Summary
|
||||
|
||||
This policy ensures that:
|
||||
- All PRs receive proper review from assigned collaborators
|
||||
- Maintainers have final oversight before code enters the main branch
|
||||
- The commit history remains clean and meaningful
|
||||
- Merge conflicts are minimized through appropriate merge strategies
|
||||
@@ -1,92 +0,0 @@
|
||||
# Dev Environment
|
||||
|
||||
To make changes to Invoke's backend, frontend or documentation, you'll need to set up a dev environment.
|
||||
|
||||
If you only want to make changes to the docs site, you can skip the frontend dev environment setup as described in the below guide.
|
||||
|
||||
If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
|
||||
!!! warning
|
||||
|
||||
Invoke uses a SQLite database. When you run the application as a dev install, you accept responsibility for your database. This means making regular backups (especially before pulling) and/or fixing it yourself in the event that a PR introduces a schema change.
|
||||
|
||||
If you don't need to persist your db, you can use an ephemeral in-memory database by setting `use_memory_db: true` in your `invokeai.yaml` file. You'll also want to set `scan_models_on_startup: true` so that your models are registered on startup.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Run through the [requirements][requirements link].
|
||||
|
||||
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||
|
||||
3. This repository uses Git LFS to manage large files. To ensure all assets are downloaded:
|
||||
- Install git-lfs → [Download here](https://git-lfs.com/)
|
||||
- Enable automatic LFS fetching for this repository:
|
||||
```shell
|
||||
git config lfs.fetchinclude "*"
|
||||
```
|
||||
- Fetch files from LFS (only needs to be done once; subsequent `git pull` will fetch changes automatically):
|
||||
```
|
||||
git lfs pull
|
||||
```
|
||||
4. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||
|
||||
5. Follow the [manual install][manual install link] guide, with some modifications to the install command:
|
||||
|
||||
- Use `.` instead of `invokeai` to install from the current directory. You don't need to specify the version.
|
||||
|
||||
- Add `-e` after the `install` operation to make this an [editable install][editable install link]. That means your changes to the python code will be reflected when you restart the Invoke server.
|
||||
|
||||
- When installing the `invokeai` package, add the `dev`, `test` and `docs` package options to the package specifier. You may or may not need the `xformers` option - follow the manual install guide to figure that out. So, your package specifier will be either `".[dev,test,docs]"` or `".[dev,test,docs,xformers]"`. Note the quotes!
|
||||
|
||||
With the modifications made, the install command should look something like this:
|
||||
|
||||
```sh
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall
|
||||
```
|
||||
|
||||
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||
|
||||
This is because the UI build is not distributed with the source code. You need to build it manually. End the running server instance.
|
||||
|
||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||
|
||||
7. Install the frontend dev toolchain, paying attention to versions:
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (tested on LTS, v22)
|
||||
|
||||
- [`pnpm`](https://pnpm.io/installation) (tested on v10)
|
||||
|
||||
8. Do a production build of the frontend:
|
||||
|
||||
```sh
|
||||
cd <PATH_TO_INVOKEAI_REPO>/invokeai/frontend/web
|
||||
pnpm i
|
||||
pnpm build
|
||||
```
|
||||
|
||||
9. Restart the server and navigate to the URL. You should get a UI. After making changes to the python code, restart the server to see those changes.
|
||||
|
||||
## Updating the UI
|
||||
|
||||
You'll need to run `pnpm build` every time you pull in new changes.
|
||||
|
||||
Another option is to skip the build and instead run the UI in dev mode:
|
||||
|
||||
```sh
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
This starts a vite dev server for the UI at `127.0.0.1:5173`, which you will use instead of `127.0.0.1:9090`.
|
||||
|
||||
The dev mode is substantially slower than the production build but may be more convenient if you just need to test things out. It will hot-reload the UI as you make changes to the frontend code. Sometimes the hot-reload doesn't work, and you need to manually refresh the browser tab.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is built with `mkdocs`. It provides a hot-reload dev server for the docs. Start it with `mkdocs serve`.
|
||||
|
||||
[launcher link]: ../installation/quick_start.md
|
||||
[forking link]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo
|
||||
[requirements link]: ../installation/requirements.md
|
||||
[repo link]: https://github.com/invoke-ai/InvokeAI
|
||||
[manual install link]: ../installation/manual.md
|
||||
[editable install link]: https://pip.pypa.io/en/latest/cli/pip_install/#cmdoption-e
|
||||
@@ -1,56 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
Invoke originated as a project built by the community, and that vision carries forward today as we aim to build the best pro-grade tools available. We work together to incorporate the latest in AI/ML research, making these tools available in over 20 languages to artists and creatives around the world as part of our fully permissive OSS project designed for individual users to self-host and use.
|
||||
|
||||
We welcome contributions, whether features, bug fixes, code cleanup, testing, code reviews, documentation or translation. Please check in with us before diving in to code to ensure your work aligns with our vision.
|
||||
|
||||
## Development
|
||||
|
||||
If you’d like to help with development, please see our [development guide](contribution_guides/development.md).
|
||||
|
||||
## External Providers
|
||||
|
||||
If you are adding external image generation providers or configs, see our [external provider integration guide](EXTERNAL_PROVIDERS.md).
|
||||
|
||||
**New Contributors:** If you’re unfamiliar with contributing to open source projects, take a look at our [new contributor guide](contribution_guides/newContributorChecklist.md).
|
||||
|
||||
## Nodes
|
||||
|
||||
If you’d like to add a Node, please see our [nodes contribution guide](../nodes/contributingNodes.md).
|
||||
|
||||
## Support and Triaging
|
||||
|
||||
Helping support other users in [Discord](https://discord.gg/ZmtBAhwWhy) and on Github are valuable forms of contribution that we greatly appreciate.
|
||||
|
||||
We receive many issues and requests for help from users. We're limited in bandwidth relative to our user base, so providing answers to questions or helping identify causes of issues is very helpful. By doing this, you enable us to spend time on the highest priority work.
|
||||
|
||||
## Documentation
|
||||
|
||||
If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documentation.md).
|
||||
|
||||
## Translation
|
||||
|
||||
If you'd like to help with translation, please see our [translation guide](contribution_guides/translation.md).
|
||||
|
||||
## Tutorials
|
||||
|
||||
Please reach out to @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
|
||||
## Contributors
|
||||
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](contributors.md). We thank them for their time, hard work and effort.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](../CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
1. The contribution was created in whole or in part by you and you have the right to submit it under the open-source license indicated in this project’s GitHub repository; or
|
||||
2. The contribution is based upon previous work that, to the best of your knowledge, is covered under an appropriate open-source license and you have the right under that license to submit that work with modifications, whether created in whole or in part by you, under the same open-source license (unless you are permitted to submit under a different license); or
|
||||
3. The contribution was provided directly to you by some other person who certified (1) or (2) and you have not modified it; or
|
||||
4. You understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information you submit with it, including your sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open-source license(s) involved.
|
||||
|
||||
This disclaimer is not a license and does not grant any rights or permissions. You must obtain necessary permissions and licenses, including from third parties, before contributing to this project.
|
||||
|
||||
This disclaimer is provided "as is" without warranty of any kind, whether expressed or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, or non-infringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the contribution or the use or other dealings in the contribution.
|
||||
107
docs-old/faq.md
@@ -1,107 +0,0 @@
|
||||
# FAQ
|
||||
|
||||
If the troubleshooting steps on this page don't get you up and running, please either [create an issue] or hop on [discord] for help.
|
||||
|
||||
## How to Install
|
||||
|
||||
Follow the [Quick Start guide](./installation/quick_start.md) to install Invoke.
|
||||
|
||||
## Downloading models and using existing models
|
||||
|
||||
The Model Manager tab in the UI provides a few ways to install models, including using your already-downloaded models. You'll see a popup directing you there on first startup. For more information, see the [model install docs].
|
||||
|
||||
## Missing models after updating from v3
|
||||
|
||||
If you find some models are missing after updating from v3, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
|
||||
|
||||
You can use the `Scan Folder` tab in the Model Manager UI to fix this. The models will either be in the old, now-unused `autoimport` folder, or your `models` folder.
|
||||
|
||||
- Find and copy your install's old `autoimport` folder path, install the main install folder.
|
||||
- Go to the Model Manager and click `Scan Folder`.
|
||||
- Paste the path and scan.
|
||||
- IMPORTANT: Uncheck `Inplace install`.
|
||||
- Click `Install All` to install all found models, or just install the models you want.
|
||||
|
||||
Next, find and copy your install's `models` folder path (this could be your custom models folder path, or the `models` folder inside the main install folder).
|
||||
|
||||
Follow the same steps to scan and import the missing models.
|
||||
|
||||
## Slow generation
|
||||
|
||||
- Check the [system requirements] to ensure that your system is capable of generating images.
|
||||
- Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
|
||||
- Check that your generations are happening on your GPU (if you have one). Invoke will log what is being used for generation upon startup. If your GPU isn't used, re-install to and ensure you select the appropriate GPU option.
|
||||
- If you are on Windows with an Nvidia GPU, you may have exceeded your GPU's VRAM capacity and are triggering Nvidia's "sysmem fallback". There's a guide to opt out of this behaviour in the [Low-VRAM mode guide](./features/low-vram.md).
|
||||
|
||||
## Triton error on startup
|
||||
|
||||
This can be safely ignored. Invoke doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
|
||||
|
||||
## Unable to Copy on Firefox
|
||||
|
||||
Firefox does not allow Invoke to directly access the clipboard by default. As a result, you may be unable to use certain copy functions. You can fix this by configuring Firefox to allow access to write to the clipboard:
|
||||
|
||||
- Go to `about:config` and click the Accept button
|
||||
- Search for `dom.events.asyncClipboard.clipboardItem`
|
||||
- Set it to `true` by clicking the toggle button
|
||||
- Restart Firefox
|
||||
|
||||
## Replicate image found online
|
||||
|
||||
Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc.
|
||||
|
||||
## Invalid configuration file
|
||||
|
||||
Everything seems to install ok, you get a `ValidationError` when starting up the app.
|
||||
|
||||
This is caused by an invalid setting in the `invokeai.yaml` configuration file. The error message should tell you what is wrong.
|
||||
|
||||
Check the [configuration docs] for more detail about the settings and how to specify them.
|
||||
|
||||
## Out of Memory Errors
|
||||
|
||||
The models are large, VRAM is expensive, and you may find yourself faced with Out of Memory errors when generating images. Follow our [Low-VRAM mode guide](./features/low-vram.md) to configure Invoke to prevent these.
|
||||
|
||||
## Memory Leak (Linux)
|
||||
|
||||
If you notice a memory leak, it could be caused to memory fragmentation as models are loaded and/or moved from CPU to GPU.
|
||||
|
||||
A workaround is to tune memory allocation with an environment variable:
|
||||
|
||||
```bash
|
||||
# Force blocks >1MB to be allocated with `mmap` so that they are released to the system immediately when they are freed.
|
||||
MALLOC_MMAP_THRESHOLD_=1048576
|
||||
```
|
||||
|
||||
!!! warning "Speed vs Memory Tradeoff"
|
||||
|
||||
Your generations may be slower overall when setting this environment variable.
|
||||
|
||||
!!! info "Possibly dependent on `libc` implementation"
|
||||
|
||||
It's not known if this issue occurs with other `libc` implementations such as `musl`.
|
||||
|
||||
If you encounter this issue and your system uses a different implementation, please try this environment variable and let us know if it fixes the issue.
|
||||
|
||||
<h3>Detailed Discussion</h3>
|
||||
|
||||
Python (and PyTorch) relies on the memory allocator from the C Standard Library (`libc`). On linux, with the GNU C Standard Library implementation (`glibc`), our memory access patterns have been observed to cause severe memory fragmentation.
|
||||
|
||||
This fragmentation results in large amounts of memory that has been freed but can't be released back to the OS. Loading models from disk and moving them between CPU/CUDA seem to be the operations that contribute most to the fragmentation.
|
||||
|
||||
This memory fragmentation issue can result in OOM crashes during frequent model switching, even if `ram` (the max RAM cache size) is set to a reasonable value (e.g. a OOM crash with `ram=16` on a system with 32GB of RAM).
|
||||
|
||||
This problem may also exist on other OSes, and other `libc` implementations. But, at the time of writing, it has only been investigated on linux with `glibc`.
|
||||
|
||||
To better understand how the `glibc` memory allocator works, see these references:
|
||||
|
||||
- Basics: <https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html>
|
||||
- Details: <https://sourceware.org/glibc/wiki/MallocInternals>
|
||||
|
||||
Note the differences between memory allocated as chunks in an arena vs. memory allocated with `mmap`. Under `glibc`'s default configuration, most model tensors get allocated as chunks in an arena making them vulnerable to the problem of fragmentation.
|
||||
|
||||
[model install docs]: ./installation/models.md
|
||||
[system requirements]: ./installation/requirements.md
|
||||
[create an issue]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[configuration docs]: ./configuration.md
|
||||
@@ -1,32 +0,0 @@
|
||||
Lasso Tool
|
||||
===========
|
||||
|
||||
- The Lasso tool creates selections and inpaint masks by drawing freehand or polygonal regions on the canvas.
|
||||
|
||||
How to open the Lasso tool
|
||||
--------------------------
|
||||
- Click the Lasso icon in the toolbar.
|
||||
- Hotkey: press `L` (default). The hotkey is shown in the tool's tooltip and can be customized in Hotkeys settings.
|
||||
|
||||
Modes
|
||||
-----
|
||||
- Freehand (default)
|
||||
- Hold the pointer and drag to draw a continuous contour.
|
||||
- Long segments are broken into intermediate points to keep the line continuous.
|
||||
- Very long strokes may be simplified after drawing to reduce point count for performance.
|
||||
|
||||
- Polygon
|
||||
- Click to place points; click the first point (or a point near it) to close the polygon.
|
||||
- The tool snaps the closing point to the start for precise closures.
|
||||
|
||||
Basic interactions
|
||||
------------------
|
||||
- Switch modes with the mode toggle in the toolbar.
|
||||
- To close a polygon: click the starting point again or click near it — the tool aligns the final point to the start to complete the shape.
|
||||
- The selection will be added to the current Inpaint Mask layer. If no Inpaint Mask layer exists, a new one will be created automatically.
|
||||
|
||||
Tips & behavior
|
||||
---------------
|
||||
- Hold `Space` to temporarily switch to the View tool for panning and zooming; release `Space` to return to the Lasso tool and continue drawing.
|
||||
- When using the Polygon mode, you can hold `Shift` to snap points to horizontal, vertical, or 45-degree angles for more precise shapes.
|
||||
- Hold `Ctrl` (Windows/Linux) or `Command` (macOS) while drawing to subtract from the current selection instead of adding to it.
|
||||
@@ -1,56 +0,0 @@
|
||||
---
|
||||
title: Canvas Projects
|
||||
---
|
||||
|
||||
# :material-folder-zip: Canvas Projects
|
||||
|
||||
## Save and Restore Your Canvas Work
|
||||
|
||||
Canvas Projects let you save your entire canvas setup to a file and load it back later. This is useful when you want to:
|
||||
|
||||
- **Switch between tasks** without losing your current canvas arrangement
|
||||
- **Back up complex setups** with multiple layers, masks, and reference images
|
||||
- **Share canvas layouts** with others or transfer them between machines
|
||||
- **Recover from deleted images** — all images are embedded in the project file
|
||||
|
||||
## What Gets Saved
|
||||
|
||||
A canvas project file (`.invk`) captures everything about your current canvas session:
|
||||
|
||||
- **All layers** — raster layers, control layers, inpaint masks, regional guidance
|
||||
- **All drawn content** — brush strokes, pasted images, eraser marks
|
||||
- **Reference images** — global IP-Adapter / FLUX Redux images with crop settings
|
||||
- **Regional guidance** — per-region prompts and reference images
|
||||
- **Bounding box** — position, size, aspect ratio, and scale settings
|
||||
- **All generation parameters** — prompts, seed, steps, CFG scale, guidance, scheduler, model, VAE, dimensions, img2img strength, infill settings, canvas coherence, refiner settings, FLUX/Z-Image specific parameters, and more
|
||||
- **LoRAs** — all added LoRA models with their weights and enabled/disabled state
|
||||
|
||||
## How to Save a Project
|
||||
|
||||
You can save from two places:
|
||||
|
||||
1. **Toolbar** — Click the **Archive icon** in the canvas toolbar, then select **Save Canvas Project**
|
||||
2. **Context menu** — Right-click the canvas, open the **Project** submenu, then select **Save Canvas Project**
|
||||
|
||||
A dialog will ask you to enter a **project name**. This name is used as the filename (e.g., entering "My Portrait" saves as `My Portrait.invk`) and is stored inside the project file.
|
||||
|
||||
## How to Load a Project
|
||||
|
||||
1. **Toolbar** — Click the **Archive icon**, then select **Load Canvas Project**
|
||||
2. **Context menu** — Right-click the canvas, open the **Project** submenu, then select **Load Canvas Project**
|
||||
|
||||
A file dialog will open. Select your `.invk` file. You will see a confirmation dialog warning that loading will replace your current canvas. Click **Load** to proceed.
|
||||
|
||||
### What Happens on Load
|
||||
|
||||
- Your current canvas is **completely replaced** — all existing layers, masks, reference images, and parameters are overwritten
|
||||
- Images that are already present on your InvokeAI server are reused automatically (no duplicate uploads)
|
||||
- Images that were deleted from the server are re-uploaded from the project file
|
||||
- If the saved model is not installed on your system, the model identifier is still restored — you will need to select an available model manually
|
||||
|
||||
## Good to Know
|
||||
|
||||
- **No undo** — Loading a project replaces your canvas entirely. There is no way to undo this action, so save your current project first if you want to keep it.
|
||||
- **Image deduplication** — When loading, images already on your server are not re-uploaded. Only missing images are uploaded from the project file.
|
||||
- **File size** — The `.invk` file size depends on the number and resolution of images in your canvas. A project with many high-resolution layers can be large.
|
||||
- **Model availability** — The project saves which model was selected, but does not include the model itself. If the model is not installed when you load the project, you will need to select a different one.
|
||||
|
Before Width: | Height: | Size: 72 KiB |
@@ -1,80 +0,0 @@
|
||||
# Customizable Hotkeys
|
||||
|
||||
InvokeAI allows you to customize all keyboard shortcuts (hotkeys) to match your workflow preferences.
|
||||
|
||||
## Features
|
||||
|
||||
- **View All Hotkeys**: See all available keyboard shortcuts in one place
|
||||
- **Customize Any Hotkey**: Change any shortcut to your preference
|
||||
- **Multiple Bindings**: Assign multiple key combinations to the same action
|
||||
- **Smart Validation**: Built-in validation prevents invalid combinations
|
||||
- **Persistent Settings**: Your custom hotkeys are saved and restored across sessions
|
||||
- **Easy Reset**: Reset individual hotkeys or all hotkeys back to defaults
|
||||
|
||||
## How to Use
|
||||
|
||||
### Opening the Hotkeys Modal
|
||||
|
||||
Press `Shift+?` or click the keyboard icon in the application to open the Hotkeys Modal.
|
||||
|
||||
### Viewing Hotkeys
|
||||
|
||||
In **View Mode** (default), you can:
|
||||
- Browse all available hotkeys organized by category (App, Canvas, Gallery, Workflows, etc.)
|
||||
- Search for specific hotkeys using the search bar
|
||||
- See the current key combination for each action
|
||||
|
||||
### Customizing Hotkeys
|
||||
|
||||
1. Click the **Edit Mode** button at the bottom of the Hotkeys Modal
|
||||
2. Find the hotkey you want to change
|
||||
3. Click the **pencil icon** next to it
|
||||
4. The editor will appear with:
|
||||
- **Input field**: Enter your new hotkey combination
|
||||
- **Modifier buttons**: Quick-insert Mod, Ctrl, Shift, Alt keys
|
||||
- **Help icon** (?): Shows syntax examples and valid keys
|
||||
- **Live preview**: See how your hotkey will look
|
||||
|
||||
5. Enter your new hotkey using the format:
|
||||
- `mod+a` - Mod key + A (Mod = Ctrl on Windows/Linux, Cmd on Mac)
|
||||
- `ctrl+shift+k` - Multiple modifiers
|
||||
- `f1` - Function keys
|
||||
- `mod+enter, ctrl+enter` - Multiple alternatives (separated by comma)
|
||||
|
||||
6. Click the **checkmark** or press Enter to save
|
||||
7. Click the **X** or press Escape to cancel
|
||||
|
||||
### Resetting Hotkeys
|
||||
|
||||
**Reset a single hotkey:**
|
||||
- Click the counter-clockwise arrow icon that appears next to customized hotkeys
|
||||
|
||||
**Reset all hotkeys:**
|
||||
- In Edit Mode, click the **Reset All to Default** button at the bottom
|
||||
|
||||
### Hotkey Format Reference
|
||||
|
||||
**Valid Modifiers:**
|
||||
- `mod` - Context-aware: Ctrl (Windows/Linux) or Cmd (Mac)
|
||||
- `ctrl` - Control key
|
||||
- `shift` - Shift key
|
||||
- `alt` - Alt key (Option on Mac)
|
||||
|
||||
**Valid Keys:**
|
||||
- Letters: `a-z`
|
||||
- Numbers: `0-9`
|
||||
- Function keys: `f1-f12`
|
||||
- Special keys: `enter`, `space`, `tab`, `backspace`, `delete`, `escape`
|
||||
- Arrow keys: `up`, `down`, `left`, `right`
|
||||
- And more...
|
||||
|
||||
**Examples:**
|
||||
- ✅ `mod+s` - Save action
|
||||
- ✅ `ctrl+shift+p` - Command palette
|
||||
- ✅ `f5, mod+r` - Two alternatives for refresh
|
||||
- ❌ `mod+` - Invalid (no key after modifier)
|
||||
- ❌ `shift+ctrl+` - Invalid (ends with modifier)
|
||||
|
||||
## For Developers
|
||||
|
||||
For technical implementation details, architecture, and how to add new hotkeys to the system, see the [Hotkeys Developer Documentation](../contributing/HOTKEYS.md).
|
||||
@@ -1,176 +0,0 @@
|
||||
---
|
||||
title: Low-VRAM mode
|
||||
---
|
||||
|
||||
As of v5.6.0, Invoke has a low-VRAM mode. It works on systems with dedicated GPUs (Nvidia GPUs on Windows/Linux and AMD GPUs on Linux).
|
||||
|
||||
This allows you to generate even if your GPU doesn't have enough VRAM to hold full models. Most users should be able to run even the beefiest models - like the ~24GB unquantised FLUX dev model.
|
||||
|
||||
## Enabling Low-VRAM mode
|
||||
|
||||
To enable Low-VRAM mode, add this line to your `invokeai.yaml` configuration file, then restart Invoke:
|
||||
|
||||
```yaml
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
**Windows users should also [disable the Nvidia sysmem fallback](#disabling-nvidia-sysmem-fallback-windows-only)**.
|
||||
|
||||
It is possible to fine-tune the settings for best performance or if you still get out-of-memory errors (OOMs).
|
||||
|
||||
!!! tip "How to find `invokeai.yaml`"
|
||||
|
||||
The `invokeai.yaml` configuration file lives in your install directory. To access it, run the **Invoke Community Edition** launcher and click the install location. This will open your install directory in a file explorer window.
|
||||
|
||||
You'll see `invokeai.yaml` there and can edit it with any text editor. After making changes, restart Invoke.
|
||||
|
||||
If you don't see `invokeai.yaml`, launch Invoke once. It will create the file on its first startup.
|
||||
|
||||
## Details and fine-tuning
|
||||
|
||||
Low-VRAM mode involves 4 features, each of which can be configured or fine-tuned:
|
||||
|
||||
- Partial model loading (`enable_partial_loading`)
|
||||
- PyTorch CUDA allocator config (`pytorch_cuda_alloc_conf`)
|
||||
- Dynamic RAM and VRAM cache sizes (`max_cache_ram_gb`, `max_cache_vram_gb`)
|
||||
- Working memory (`device_working_mem_gb`)
|
||||
- Keeping a RAM weight copy (`keep_ram_copy_of_weights`)
|
||||
|
||||
Read on to learn about these features and understand how to fine-tune them for your system and use-cases.
|
||||
|
||||
### Partial model loading
|
||||
|
||||
Invoke's partial model loading works by streaming model "layers" between RAM and VRAM as they are needed.
|
||||
|
||||
When an operation needs layers that are not in VRAM, but there isn't enough room to load them, inactive layers are offloaded to RAM to make room.
|
||||
|
||||
#### Enabling partial model loading
|
||||
|
||||
As described above, you can enable partial model loading by adding this line to `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
### PyTorch CUDA allocator config
|
||||
|
||||
The PyTorch CUDA allocator's behavior can be configured using the `pytorch_cuda_alloc_conf` config. Tuning the allocator configuration can help to reduce the peak reserved VRAM. The optimal configuration is dependent on many factors (e.g. device type, VRAM, CUDA driver version, etc.), but switching from PyTorch's native allocator to using CUDA's built-in allocator works well on many systems. To try this, add the following line to your `invokeai.yaml` file:
|
||||
|
||||
```yaml
|
||||
pytorch_cuda_alloc_conf: "backend:cudaMallocAsync"
|
||||
```
|
||||
|
||||
A more complete explanation of the available configuration options is [here](https://pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf).
|
||||
|
||||
### Dynamic RAM and VRAM cache sizes
|
||||
|
||||
Loading models from disk is slow and can be a major bottleneck for performance. Invoke uses two model caches - RAM and VRAM - to reduce loading from disk to a minimum.
|
||||
|
||||
By default, Invoke manages these caches' sizes dynamically for best performance.
|
||||
|
||||
#### Fine-tuning cache sizes
|
||||
|
||||
Prior to v5.6.0, the cache sizes were static, and for best performance, many users needed to manually fine-tune the `ram` and `vram` settings in `invokeai.yaml`.
|
||||
|
||||
As of v5.6.0, the caches are dynamically sized. The `ram` and `vram` settings are no longer used, and new settings are added to configure the cache.
|
||||
|
||||
**Most users will not need to fine-tune the cache sizes.**
|
||||
|
||||
But, if your GPU has enough VRAM to hold models fully, you might get a perf boost by manually setting the cache sizes in `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
# The default max cache RAM size is logged on InvokeAI startup. It is determined based on your system RAM / VRAM.
|
||||
# You can override the default value by setting `max_cache_ram_gb`.
|
||||
# Increasing `max_cache_ram_gb` will increase the amount of RAM used to cache inactive models, resulting in faster model
|
||||
# reloads for the cached models.
|
||||
# As an example, if your system has 32GB of RAM and no other heavy processes, setting the `max_cache_ram_gb` to 28GB
|
||||
# might be a good value to achieve aggressive model caching.
|
||||
max_cache_ram_gb: 28
|
||||
|
||||
# The default max cache VRAM size is adjusted dynamically based on the amount of available VRAM (taking into
|
||||
# consideration the VRAM used by other processes).
|
||||
# You can override the default value by setting `max_cache_vram_gb`.
|
||||
# CAUTION: Most users should not manually set this value. See warning below.
|
||||
max_cache_vram_gb: 16
|
||||
```
|
||||
|
||||
!!! warning "Max safe value for `max_cache_vram_gb`"
|
||||
|
||||
Most users should not manually configure the `max_cache_vram_gb`. This configuration value takes precedence over the `device_working_mem_gb` and any operations that explicitly reserve additional working memory (e.g. VAE decode). As such, manually configuring it increases the likelihood of encountering out-of-memory errors.
|
||||
|
||||
For users who wish to configure `max_cache_vram_gb`, the max safe value can be determined by subtracting `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
|
||||
|
||||
For example, if you have a 12GB GPU, the max safe value for `max_cache_vram_gb` is `12GB - 3GB = 9GB`.
|
||||
|
||||
If you had increased `device_working_mem_gb` to 4GB, then the max safe value for `max_cache_vram_gb` is `12GB - 4GB = 8GB`.
|
||||
|
||||
Most users who override `max_cache_vram_gb` are doing so because they wish to use significantly less VRAM, and should be setting `max_cache_vram_gb` to a value significantly less than the 'max safe value'.
|
||||
|
||||
### Working memory
|
||||
|
||||
Invoke cannot use _all_ of your VRAM for model caching and loading. It requires some VRAM to use as working memory for various operations.
|
||||
|
||||
Invoke reserves 3GB VRAM as working memory by default, which is enough for most use-cases. However, it is possible to fine-tune this setting if you still get OOMs.
|
||||
|
||||
#### Fine-tuning working memory
|
||||
|
||||
You can increase the working memory size in `invokeai.yaml` to prevent OOMs:
|
||||
|
||||
```yaml
|
||||
# The default is 3GB - bump it up to 4GB to prevent OOMs.
|
||||
device_working_mem_gb: 4
|
||||
```
|
||||
|
||||
!!! tip "Operations may request more working memory"
|
||||
|
||||
For some operations, we can determine VRAM requirements in advance and allocate additional working memory to prevent OOMs.
|
||||
|
||||
VAE decoding is one such operation. This operation converts the generation process's output into an image. For large image outputs, this might use more than the default working memory size of 3GB.
|
||||
|
||||
During this decoding step, Invoke calculates how much VRAM will be required to decode and requests that much VRAM from the model manager. If the amount exceeds the working memory size, the model manager will offload cached model layers from VRAM until there's enough VRAM to decode.
|
||||
|
||||
Once decoding completes, the model manager "reclaims" the extra VRAM allocated as working memory for future model loading operations.
|
||||
|
||||
### Keeping a RAM weight copy
|
||||
|
||||
Invoke has the option of keeping a RAM copy of all model weights, even when they are loaded onto the GPU. This optimization is _on_ by default, and enables faster model switching and LoRA patching. Disabling this feature will reduce the average RAM load while running Invoke (peak RAM likely won't change), at the cost of slower model switching and LoRA patching. If you have limited RAM, you can disable this optimization:
|
||||
|
||||
```yaml
|
||||
# Set to false to reduce the average RAM usage at the cost of slower model switching and LoRA patching.
|
||||
keep_ram_copy_of_weights: false
|
||||
```
|
||||
|
||||
### Disabling Nvidia sysmem fallback (Windows only)
|
||||
|
||||
On Windows, Nvidia GPUs are able to use system RAM when their VRAM fills up via **sysmem fallback**. While it sounds like a good idea on the surface, in practice it causes massive slowdowns during generation.
|
||||
|
||||
It is strongly suggested to disable this feature:
|
||||
|
||||
- Open the **NVIDIA Control Panel** app.
|
||||
- Expand **3D Settings** on the left panel.
|
||||
- Click **Manage 3D Settings** in the left panel.
|
||||
- Find **CUDA - Sysmem Fallback Policy** in the right panel and set it to **Prefer No Sysmem Fallback**.
|
||||
|
||||

|
||||
|
||||
!!! tip "Invoke does the same thing, but better"
|
||||
|
||||
If the sysmem fallback feature sounds familiar, that's because Invoke's partial model loading strategy is conceptually very similar - use VRAM when there's room, else fall back to RAM.
|
||||
|
||||
Unfortunately, the Nvidia implementation is not optimized for applications like Invoke and does more harm than good.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Windows page file
|
||||
|
||||
Invoke has high virtual memory (a.k.a. 'committed memory') requirements. This can cause issues on Windows if the page file size limits are hit. (See this issue for the technical details on why this happens: https://github.com/invoke-ai/InvokeAI/issues/7563).
|
||||
|
||||
If you run out of page file space, InvokeAI may crash. Often, these crashes will happen with one of the following errors:
|
||||
|
||||
- InvokeAI exits with Windows error code `3221225477`
|
||||
- InvokeAI crashes without an error, but `eventvwr.msc` reveals an error with code `0xc0000005` (the hex equivalent of `3221225477`)
|
||||
|
||||
If you are running out of page file space, try the following solutions:
|
||||
|
||||
- Make sure that you have sufficient disk space for the page file to grow. Watch your disk usage as Invoke runs. If it climbs near 100% leading up to the crash, then this is very likely the source of the issue. Clear out some disk space to resolve the issue.
|
||||
- Make sure that your page file is set to "System managed size" (this is the default) rather than a custom size. Under the "System managed size" policy, the page file will grow dynamically as needed.
|
||||
@@ -1,152 +0,0 @@
|
||||
# Orphaned Models Synchronization Feature
|
||||
|
||||
## Overview
|
||||
This feature adds a UI for synchronizing the models directory by finding and removing orphaned model files. Orphaned models are directories that contain model files but are not referenced in the InvokeAI database.
|
||||
|
||||
## Implementation Summary
|
||||
|
||||
### Backend (Python)
|
||||
|
||||
#### New Service: `OrphanedModelsService`
|
||||
- Location: `invokeai/app/services/orphaned_models/`
|
||||
- Implements the core logic from the CLI script
|
||||
- Methods:
|
||||
- `find_orphaned_models()`: Scans the models directory and database to find orphaned models
|
||||
- `delete_orphaned_models(paths)`: Safely deletes specified orphaned model directories
|
||||
|
||||
#### API Routes
|
||||
Added to `invokeai/app/api/routers/model_manager.py`:
|
||||
- `GET /api/v2/models/sync/orphaned`: Returns list of orphaned models with metadata
|
||||
- `DELETE /api/v2/models/sync/orphaned`: Deletes selected orphaned models
|
||||
|
||||
#### Data Models
|
||||
- `OrphanedModelInfo`: Contains path, absolute_path, files list, and size_bytes
|
||||
- `DeleteOrphanedModelsRequest`: Contains list of paths to delete
|
||||
- `DeleteOrphanedModelsResponse`: Contains deleted paths and errors
|
||||
|
||||
### Frontend (TypeScript/React)
|
||||
|
||||
#### New Components
|
||||
|
||||
1. **SyncModelsButton.tsx**
|
||||
- Red button styled with `colorScheme="error"` for visual prominence
|
||||
- Labeled "Sync Models"
|
||||
- Opens the SyncModelsDialog when clicked
|
||||
- Located next to the "+ Add Models" button
|
||||
|
||||
2. **SyncModelsDialog.tsx**
|
||||
- Modal dialog that displays orphaned models
|
||||
- Features:
|
||||
- List of orphaned models with checkboxes (default: all checked)
|
||||
- "Select All" / "Deselect All" toggle
|
||||
- Shows file count and total size for each model
|
||||
- "Delete" and "Cancel" buttons
|
||||
- Loading spinner while fetching data
|
||||
- Error handling with user-friendly messages
|
||||
- Automatically shows toast if no orphaned models found
|
||||
- Shows success/error toasts after deletion
|
||||
|
||||
#### API Integration
|
||||
- Added `useGetOrphanedModelsQuery` and `useDeleteOrphanedModelsMutation` hooks to `services/api/endpoints/models.ts`
|
||||
- Integrated with RTK Query for efficient data fetching and caching
|
||||
|
||||
#### Translation Strings
|
||||
Added to `public/locales/en.json`:
|
||||
- syncModels, noOrphanedModels, orphanedModelsFound
|
||||
- orphanedModelsDescription, foundOrphanedModels (with pluralization)
|
||||
- filesCount, deleteSelected, deselectAll
|
||||
- Success/error messages for deletion operations
|
||||
|
||||
## User Experience Flow
|
||||
|
||||
1. User clicks the red "Sync Models" button in the Model Manager
|
||||
2. System queries the backend for orphaned models
|
||||
3. If no orphaned models:
|
||||
- Toast message: "The models directory is synchronized. No orphaned files found."
|
||||
- Dialog closes automatically
|
||||
4. If orphaned models found:
|
||||
- Dialog shows list with checkboxes (all selected by default)
|
||||
- User can toggle individual models or use "Select All" / "Deselect All"
|
||||
- Each model shows:
|
||||
- Directory path
|
||||
- File count
|
||||
- Total size (formatted: B, KB, MB, GB)
|
||||
5. User clicks "Delete {{count}} selected"
|
||||
6. System deletes selected models
|
||||
7. Success/error toasts appear
|
||||
8. Dialog closes
|
||||
|
||||
## Safety Features
|
||||
|
||||
1. **Database Backup**: The service creates a backup before any deletion
|
||||
2. **Selective Deletion**: Users choose which models to delete
|
||||
3. **Path Validation**: Ensures paths are within the models directory
|
||||
4. **Error Handling**: Reports which models failed to delete and why
|
||||
5. **Default Selected**: All models are selected by default for convenience
|
||||
6. **Confirmation Required**: User must explicitly click Delete
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Directory-Based Detection
|
||||
The system treats model paths as directories:
|
||||
- If database has `model-id/file.safetensors`, the entire `model-id/` directory belongs to that model
|
||||
- All files and subdirectories within a registered model directory are protected
|
||||
- Only directories with NO registered models are flagged as orphaned
|
||||
|
||||
### Supported File Extensions
|
||||
- .safetensors
|
||||
- .ckpt
|
||||
- .pt
|
||||
- .pth
|
||||
- .bin
|
||||
- .onnx
|
||||
|
||||
### Skipped Directories
|
||||
- .download_cache
|
||||
- .convert_cache
|
||||
- \_\_pycache\_\_
|
||||
- .git
|
||||
|
||||
## Testing Recommendations
|
||||
|
||||
1. **Test with orphaned models**:
|
||||
- Manually copy a model directory to models folder
|
||||
- Verify it appears in the dialog
|
||||
- Delete it and verify removal
|
||||
|
||||
2. **Test with no orphaned models**:
|
||||
- Clean install
|
||||
- Verify toast message appears
|
||||
|
||||
3. **Test partial selection**:
|
||||
- Select only some models
|
||||
- Verify only selected ones are deleted
|
||||
|
||||
4. **Test error scenarios**:
|
||||
- Invalid paths
|
||||
- Permission issues
|
||||
- Verify error messages are clear
|
||||
|
||||
## Files Changed
|
||||
|
||||
### Backend
|
||||
- `invokeai/app/services/orphaned_models/__init__.py` (new)
|
||||
- `invokeai/app/services/orphaned_models/orphaned_models_service.py` (new)
|
||||
- `invokeai/app/api/routers/model_manager.py` (modified)
|
||||
|
||||
### Frontend
|
||||
- `invokeai/frontend/web/src/services/api/endpoints/models.ts` (modified)
|
||||
- `invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManager.tsx` (modified)
|
||||
- `invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/SyncModelsButton.tsx` (new)
|
||||
- `invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/SyncModelsDialog.tsx` (new)
|
||||
- `invokeai/frontend/web/public/locales/en.json` (modified)
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Potential improvements for future versions:
|
||||
1. Show preview of what will be deleted before deletion
|
||||
2. Add option to move orphaned models to archive instead of deleting
|
||||
3. Show disk space that will be freed
|
||||
4. Add filter/search in orphaned models list
|
||||
5. Support for undo operation
|
||||
6. Scheduled automatic cleanup
|
||||
@@ -1,149 +0,0 @@
|
||||
# Manual Install
|
||||
|
||||
!!! warning
|
||||
|
||||
**Python experience is mandatory.**
|
||||
|
||||
If you want to use Invoke locally, you should probably use the [launcher](./quick_start.md).
|
||||
|
||||
If you want to contribute to Invoke or run the app on the latest dev branch, instead follow the [dev environment](../contributing/dev-environment.md) guide.
|
||||
|
||||
InvokeAI is distributed as a python package on PyPI, installable with `pip`. There are a few things that are handled by the launcher that you'll need to manage manually, described in this guide.
|
||||
|
||||
## Requirements
|
||||
|
||||
Before you start, go through the [installation requirements](./requirements.md).
|
||||
|
||||
## Walkthrough
|
||||
|
||||
We'll use [`uv`](https://github.com/astral-sh/uv) to install python and create a virtual environment, then install the `invokeai` package. `uv` is a modern, very fast alternative to `pip`.
|
||||
|
||||
The following commands vary depending on the version of Invoke being installed and the system onto which it is being installed.
|
||||
|
||||
1. Install `uv` as described in its [docs](https://docs.astral.sh/uv/getting-started/installation/#standalone-installer). We suggest using the standalone installer method.
|
||||
|
||||
Run `uv --version` to confirm that `uv` is installed and working. After installation, you may need to restart your terminal to get access to `uv`.
|
||||
|
||||
2. Create a directory for your installation, typically in your home directory (e.g. `~/invokeai` or `$Home/invokeai`):
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
mkdir ~/invokeai
|
||||
cd ~/invokeai
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
mkdir $Home/invokeai
|
||||
cd $Home/invokeai
|
||||
```
|
||||
|
||||
3. Create a virtual environment in that directory:
|
||||
|
||||
```sh
|
||||
uv venv --relocatable --prompt invoke --python 3.12 --python-preference only-managed .venv
|
||||
```
|
||||
|
||||
This command creates a portable virtual environment at `.venv` complete with a portable python 3.12. It doesn't matter if your system has no python installed, or has a different version - `uv` will handle everything.
|
||||
|
||||
4. Activate the virtual environment:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Choose a version to install. Review the [GitHub releases page](https://github.com/invoke-ai/InvokeAI/releases).
|
||||
|
||||
6. Determine the package specifier to use when installing. This is a performance optimization.
|
||||
|
||||
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
|
||||
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
|
||||
|
||||
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using [UV's built in torch support.](https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection)
|
||||
|
||||
=== "Invoke v5.12 and later"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
|
||||
- **In all other cases, do not use a torch backend.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.0.0 to v5.9.1"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v4"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
|
||||
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
|
||||
```
|
||||
|
||||
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
|
||||
```
|
||||
|
||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
deactivate && source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```ps
|
||||
deactivate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
10. Run the application, specifying the directory you created earlier as the root directory:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
invokeai-web --root ~/invokeai
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
invokeai-web --root $Home/invokeai
|
||||
```
|
||||
|
||||
## Headless Install and Launch Scripts
|
||||
|
||||
If you run Invoke on a headless server, you might want to install and run Invoke on the command line.
|
||||
|
||||
We do not plan to maintain scripts to do this moving forward, instead focusing our dev resources on the GUI [launcher](../installation/quick_start.md).
|
||||
|
||||
You can create your own scripts for this by copying the handful of commands in this guide. `uv`'s [`pip` interface docs](https://docs.astral.sh/uv/reference/cli/#uv-pip-install) may be useful.
|
||||
@@ -1,154 +0,0 @@
|
||||
# Invoke Community Edition Quick Start
|
||||
|
||||
Welcome to Invoke! Follow these steps to install, update, and get started creating.
|
||||
|
||||
## Step 1: System Requirements
|
||||
|
||||
Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size. The requirements below are rough guidelines.
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
|
||||
!!! info "Hardware Requirements (Windows/Linux)"
|
||||
|
||||
=== "SD1.5 - 512×512"
|
||||
|
||||
- GPU: Nvidia 10xx series or later, 4GB+ VRAM.
|
||||
- Memory: At least 8GB RAM.
|
||||
- Disk: 10GB for base installation plus 30GB for models.
|
||||
|
||||
=== "SDXL - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 100GB for models.
|
||||
|
||||
=== "FLUX.1 - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 10GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 200GB for models.
|
||||
|
||||
=== "FLUX.2 Klein - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 6GB+ VRAM for GGUF Q4 quantized models, 12GB+ for full precision.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 20GB for models.
|
||||
|
||||
=== "Z-Image Turbo - 1024x1024"
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM for the Q4_K quantized model. 16GB+ needed for the Q8 or BF16 models.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 35GB for models.
|
||||
|
||||
|
||||
More detail on system requirements can be found [here](./requirements.md).
|
||||
|
||||
## Step 2: Download and Set Up the Launcher
|
||||
|
||||
The Launcher manages your Invoke install. Follow these instructions to download and set up the Launcher.
|
||||
|
||||
!!! info "Instructions for each OS"
|
||||
|
||||
=== "Windows"
|
||||
|
||||
- [Download for Windows](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe)
|
||||
- Run the `EXE` to install the Launcher and start it.
|
||||
- A desktop shortcut will be created; use this to run the Launcher in the future.
|
||||
- You can delete the `EXE` file you downloaded.
|
||||
|
||||
=== "macOS"
|
||||
|
||||
- [Download for macOS](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg)
|
||||
- Open the `DMG` and drag the app into `Applications`.
|
||||
- Run the Launcher using its entry in `Applications`.
|
||||
- You can delete the `DMG` file you downloaded.
|
||||
|
||||
=== "Linux"
|
||||
|
||||
- [Download for Linux](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage)
|
||||
- You may need to edit the `AppImage` file properties and make it executable.
|
||||
- Optionally move the file to a location that does not require admin privileges and add a desktop shortcut for it.
|
||||
- Run the Launcher by double-clicking the `AppImage` or the shortcut you made.
|
||||
|
||||
## Step 3: Install Invoke
|
||||
|
||||
Run the Launcher you just set up if you haven't already. Click **Install** and follow the instructions to install (or update) Invoke.
|
||||
|
||||
If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation.
|
||||
|
||||
!!! tip "Updating"
|
||||
|
||||
The Launcher will check for updates for itself _and_ Invoke.
|
||||
|
||||
- When the Launcher detects an update is available for itself, you'll get a small popup window. Click through this and the Launcher will update itself.
|
||||
- When the Launcher detects an update for Invoke, you'll see a small green alert in the Launcher. Click that and follow the instructions to update Invoke.
|
||||
|
||||
## Step 4: Launch
|
||||
|
||||
Once installed, click **Finish**, then **Launch** to start Invoke.
|
||||
|
||||
The very first run after an installation or update will take a few extra moments to get ready.
|
||||
|
||||
!!! tip "Server Mode"
|
||||
|
||||
The launcher runs Invoke as a desktop application. You can enable **Server Mode** in the launcher's settings to disable this and instead access the UI through your web browser.
|
||||
|
||||
## Step 5: Install Models
|
||||
|
||||
With Invoke started up, you'll need to install some models.
|
||||
|
||||
The quickest way to get started is to install a **Starter Model** bundle. If you already have a model collection, Invoke can use it.
|
||||
|
||||
!!! info "Install Models"
|
||||
|
||||
=== "Install a Starter Model bundle"
|
||||
|
||||
1. Go to the **Models** tab.
|
||||
2. Click **Starter Models** on the right.
|
||||
3. Click one of the bundles to install its models. Refer to the [system requirements](#step-1-confirm-system-requirements) if you're unsure which model architecture will work for your system.
|
||||
|
||||
=== "Use my model collection"
|
||||
|
||||
4. Go to the **Models** tab.
|
||||
5. Click **Scan Folder** on the right.
|
||||
6. Paste the path to your models collection and click **Scan Folder**.
|
||||
7. With **In-place install** enabled, Invoke will leave the model files where they are. If you disable this, **Invoke will move the models into its own folders**.
|
||||
|
||||
You’re now ready to start creating!
|
||||
|
||||
## Step 6: Learn the Basics
|
||||
|
||||
We recommend watching our [Getting Started Playlist](https://www.youtube.com/playlist?list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO). It covers essential features and workflows, including:
|
||||
|
||||
- Generating your first image.
|
||||
- Using control layers and reference guides.
|
||||
- Refining images with advanced workflows.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If installation fails, retrying the install in Repair Mode may fix it. There's a checkbox to enable this on the Review step of the install flow.
|
||||
|
||||
If that doesn't fix it, [clearing the `uv` cache](https://docs.astral.sh/uv/reference/cli/#uv-cache-clean) might do the trick:
|
||||
|
||||
- Open and start the dev console (button at the bottom-left of the launcher).
|
||||
- Run `uv cache clean`.
|
||||
- Retry the installation. Enable Repair Mode for good measure.
|
||||
|
||||
If you are still unable to install, try installing to a different location and see if that works.
|
||||
|
||||
If you still have problems, ask for help on the Invoke [discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
## Other Installation Methods
|
||||
|
||||
- You can install the Invoke application as a python package. See our [manual install](./manual.md) docs.
|
||||
- You can run Invoke with docker. See our [docker install](./docker.md) docs.
|
||||
|
||||
## Need Help?
|
||||
|
||||
- Visit our [Support Portal](https://support.invoke.ai).
|
||||
- Watch the [Getting Started Playlist](https://www.youtube.com/playlist?list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO).
|
||||
- Join the conversation on [Discord][discord link].
|
||||
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
@@ -1,148 +0,0 @@
|
||||
# Requirements
|
||||
|
||||
Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
|
||||
## Hardware
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size.
|
||||
|
||||
The requirements below are rough guidelines for best performance. GPUs
|
||||
with less VRAM typically still work, if a bit slower. Follow the
|
||||
[Low-VRAM mode guide](../features/low-vram.md) to optimize performance.
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
|
||||
!!! info "Hardware Requirements (Windows/Linux)"
|
||||
|
||||
=== "SD1.5 - 512×512"
|
||||
|
||||
- GPU: Nvidia 10xx series or later, 4GB+ VRAM.
|
||||
- Memory: At least 8GB RAM.
|
||||
- Disk: 10GB for base installation plus 30GB for models.
|
||||
|
||||
=== "SDXL - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 100GB for models.
|
||||
|
||||
=== "FLUX.1 - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 10GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 200GB for models.
|
||||
|
||||
=== "FLUX.2 Klein 4B - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 30xx series or later, 12GB+ VRAM (e.g. RTX 3090, RTX 4070). FP8 version works with 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 20GB for models (Diffusers format with encoder).
|
||||
|
||||
=== "FLUX.2 Klein 9B - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 40xx series, 24GB+ VRAM (e.g. RTX 4090). FP8 version works with 12GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 40GB for models (Diffusers format with encoder).
|
||||
|
||||
=== "Z-Image Turbo - 1024x1024"
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM for the Q4_K quantized model. 16GB+ needed for the Q8 or BF16 models.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 35GB for models.
|
||||
|
||||
!!! info "`tmpfs` on Linux"
|
||||
|
||||
If your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
|
||||
|
||||
## Python
|
||||
|
||||
!!! tip "The launcher installs python for you"
|
||||
|
||||
You don't need to do this if you are installing with the [Invoke Launcher](./quick_start.md).
|
||||
|
||||
Invoke requires python 3.11 through 3.12. If you don't already have one of these versions installed, we suggest installing 3.12, as it will be supported for longer.
|
||||
|
||||
Check that your system has an up-to-date Python installed by running `python3 --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
||||
|
||||
!!! info "Installing Python"
|
||||
|
||||
=== "Windows"
|
||||
|
||||
- Install python with [an official installer].
|
||||
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
||||
- You may need to install [Microsoft Visual C++ Redistributable].
|
||||
|
||||
=== "macOS"
|
||||
|
||||
- Install python with [an official installer].
|
||||
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.11/Install\ Certificates.command`
|
||||
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
||||
|
||||
=== "Linux"
|
||||
|
||||
- Installing python varies depending on your system. We recommend [using `uv` to manage your python installation](https://docs.astral.sh/uv/concepts/python-versions/#installing-a-python-version).
|
||||
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
|
||||
## Drivers
|
||||
|
||||
If you have an Nvidia or AMD GPU, you may need to manually install drivers or other support packages for things to work well or at all.
|
||||
|
||||
### Nvidia
|
||||
|
||||
Run `nvidia-smi` on your system's command line to verify that drivers and CUDA are installed. If this command fails, or doesn't report versions, you will need to install drivers.
|
||||
|
||||
Go to the [CUDA Toolkit Downloads] and carefully follow the instructions for your system to get everything installed.
|
||||
|
||||
Confirm that `nvidia-smi` displays driver and CUDA versions after installation.
|
||||
|
||||
#### Linux - via Nvidia Container Runtime
|
||||
|
||||
An alternative to installing CUDA locally is to use the [Nvidia Container Runtime] to run the application in a container.
|
||||
|
||||
#### Windows - Nvidia cuDNN DLLs
|
||||
|
||||
An out-of-date cuDNN library can greatly hamper performance on 30-series and 40-series cards. Check with the community on discord to compare your `it/s` if you think you may need this fix.
|
||||
|
||||
First, locate the destination for the DLL files and make a quick back up:
|
||||
|
||||
1. Find your InvokeAI installation folder, e.g. `C:\Users\Username\InvokeAI\`.
|
||||
1. Open the `.venv` folder, e.g. `C:\Users\Username\InvokeAI\.venv` (you may need to show hidden files to see it).
|
||||
1. Navigate deeper to the `torch` package, e.g. `C:\Users\Username\InvokeAI\.venv\Lib\site-packages\torch`.
|
||||
1. Copy the `lib` folder inside `torch` and back it up somewhere.
|
||||
|
||||
Next, download and copy the updated cuDNN DLLs:
|
||||
|
||||
1. Go to <https://developer.nvidia.com/cudnn>.
|
||||
1. Create an account if needed and log in.
|
||||
1. Choose the newest version of cuDNN that works with your GPU architecture. Consult the [cuDNN support matrix] to determine the correct version for your GPU.
|
||||
1. Download the latest version and extract it.
|
||||
1. Find the `bin` folder, e.g. `cudnn-windows-x86_64-SOME_VERSION\bin`.
|
||||
1. Copy and paste the `.dll` files into the `lib` folder you located earlier. Replace files when prompted.
|
||||
|
||||
If, after restarting the app, this doesn't improve your performance, either restore your back up or re-run the installer to reset `torch` back to its original state.
|
||||
|
||||
### AMD
|
||||
|
||||
!!! info "Linux Only"
|
||||
|
||||
AMD GPUs are supported on Linux only, due to ROCm (the AMD equivalent of CUDA) support being Linux only.
|
||||
|
||||
!!! warning "Bumps Ahead"
|
||||
|
||||
While the application does run on AMD GPUs, there are occasional bumps related to spotty torch support.
|
||||
|
||||
Run `rocm-smi` on your system's command line verify that drivers and ROCm are installed. If this command fails, or doesn't report versions, you will need to install them.
|
||||
|
||||
Go to the [ROCm Documentation] and carefully follow the instructions for your system to get everything installed.
|
||||
|
||||
Confirm that `rocm-smi` displays driver and CUDA versions after installation.
|
||||
|
||||
#### Linux - via Docker Container
|
||||
|
||||
An alternative to installing ROCm locally is to use a [ROCm docker container] to run the application in a container.
|
||||
|
||||
[ROCm docker container]: https://github.com/ROCm/ROCm-docker
|
||||
[ROCm Documentation]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html
|
||||
[cuDNN support matrix]: https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html
|
||||
[Nvidia Container Runtime]: https://developer.nvidia.com/container-runtime
|
||||
[CUDA Toolkit Downloads]: https://developer.nvidia.com/cuda-downloads
|
||||
@@ -1,876 +0,0 @@
|
||||
# InvokeAI Multi-User Administrator Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This guide is for administrators managing a multi-user InvokeAI installation. It covers initial setup, user management, security best practices, and troubleshooting.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before enabling multi-user support, ensure you have:
|
||||
|
||||
- InvokeAI installed and running
|
||||
- Access to the server filesystem (for initial setup)
|
||||
- Understanding of your deployment environment
|
||||
- Backup of your existing data (recommended)
|
||||
|
||||
## Initial Setup
|
||||
|
||||
### Activating Multiuser Mode
|
||||
|
||||
To put InvokeAI into multiuser mode, you will need to add the option
|
||||
`multiuser: true` to its configuration file. This file is located at
|
||||
`INVOKEAI_ROOT/invokeai.yaml` With the InvokeAI backend halted, add
|
||||
the new configuration option to the end of the file with a text editor
|
||||
so that it looks like this:
|
||||
|
||||
```yaml
|
||||
# Internal metadata - do not edit:
|
||||
schema_version: 4.0.2
|
||||
|
||||
# Enable/disable multi-user mode
|
||||
multiuser: true
|
||||
```
|
||||
|
||||
Then restart the InvokeAI server backend from the command line or
|
||||
using the launcher.
|
||||
|
||||
!!! note "Reverting to single-user mode"
|
||||
If at any time you wish to revert to single-user mode, simply comment
|
||||
out the `multiuser` line, or change "true" to "false". Then
|
||||
restart the server. Because of the way that browsers cache pages,
|
||||
users with open InvokeAI sessions may need to force-refresh their
|
||||
browsers.
|
||||
|
||||
|
||||
### First Administrator Account
|
||||
|
||||
When InvokeAI starts for the first time in multi-user mode, you'll see the **Administrator Setup** dialog.
|
||||
|
||||
**Setup Steps:**
|
||||
|
||||
1. **Email Address**: Enter a valid email address (this becomes your username)
|
||||
|
||||
* Example: `admin@example.com` or `admin@localhost` for testing
|
||||
* Must be a valid email format
|
||||
* Cannot be changed later without database access
|
||||
|
||||
2. **Display Name**: Enter a friendly name
|
||||
|
||||
* Example: "System Administrator" or your real name
|
||||
* Can be changed later in your profile
|
||||
* Visible to other users in shared contexts
|
||||
|
||||
3. **Password**: Create a strong administrator password
|
||||
|
||||
* **Minimum requirements:**
|
||||
|
||||
* At least 8 characters long
|
||||
* Contains uppercase letters (A-Z)
|
||||
* Contains lowercase letters (a-z)
|
||||
* Contains numbers (0-9)
|
||||
|
||||
* **Recommended:**
|
||||
|
||||
* Use 12+ characters
|
||||
* Include special characters (!@#$%^&*)
|
||||
* Use a password manager to generate and store
|
||||
* Don't reuse passwords from other services
|
||||
|
||||
4. **Confirm Password**: Re-enter the password
|
||||
|
||||
5. Click **Create Administrator Account**
|
||||
|
||||
!!! warning "Important"
|
||||
Store these credentials securely! The
|
||||
first administrator account can reset
|
||||
the password to something new, but cannot
|
||||
retrieve a lost one.
|
||||
|
||||
### Configuration
|
||||
|
||||
InvokeAI can run in single-user or multi-user mode, controlled by the `multiuser` configuration option in `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
# Enable/disable multi-user mode
|
||||
multiuser: true # Enable multi-user mode (requires authentication)
|
||||
# multiuser: false # Single-user mode (no authentication required)
|
||||
# If the multiuser option is absent, single-user mode is used
|
||||
|
||||
# Database configuration
|
||||
use_memory_db: false # Use persistent database
|
||||
db_path: databases/invokeai.db # Database location
|
||||
|
||||
# Session configuration (multi-user mode only)
|
||||
jwt_secret_key: "your-secret-key-here" # Auto-generated if not specified
|
||||
jwt_token_expiry_hours: 24 # Default session timeout
|
||||
jwt_remember_me_days: 7 # "Remember me" duration
|
||||
```
|
||||
|
||||
**Single-User Mode** (`multiuser: false` or option absent):
|
||||
- No authentication required
|
||||
- All functionality enabled by default
|
||||
- All boards and images visible in unified view
|
||||
- Ideal for personal use or trusted environments
|
||||
|
||||
**Multi-User Mode** (`multiuser: true`):
|
||||
- Authentication required for access
|
||||
- User isolation for boards, images, and workflows
|
||||
- Role-based permissions enforced
|
||||
- Ideal for shared servers or team environments
|
||||
|
||||
!!! warning "Mode Switching Behavior"
|
||||
**Switching to Single-User Mode:** If boards or images were created in multi-user mode, they will all be combined into a single unified view when switching to single-user mode.
|
||||
|
||||
**Switching to Multi-User Mode:** Legacy boards and images created under single-user mode will be owned by an internal user named "system." Only the Administrator will have access to these legacy assets. A utility to migrate these legacy assets to another user will be part of a future release.
|
||||
|
||||
### Migration from Single-User
|
||||
|
||||
When upgrading from a single-user installation or switching modes:
|
||||
|
||||
1. **Automatic Migration**: The database will automatically migrate to multi-user schema when multi-user mode is first enabled
|
||||
2. **Legacy Data Ownership**: Existing data (boards, images, workflows) created in single-user mode is assigned to an internal user named "system"
|
||||
3. **Administrator Access**: Only administrators will have access to legacy "system"-owned assets when in multi-user mode
|
||||
4. **No Data Loss**: All existing content is preserved
|
||||
|
||||
**Migration Process:**
|
||||
|
||||
```bash
|
||||
# Backup your database first
|
||||
cp databases/invokeai.db databases/invokeai.db.backup
|
||||
|
||||
# Enable multi-user mode in invokeai.yaml
|
||||
# multiuser: true
|
||||
|
||||
# Start InvokeAI (migration happens automatically)
|
||||
invokeai-web
|
||||
|
||||
# Complete the administrator setup dialog
|
||||
# Legacy data will be owned by "system" user
|
||||
```
|
||||
|
||||
!!! note "Legacy Asset Migration"
|
||||
A utility to migrate legacy "system"-owned assets to specific user accounts will be available in a future release. Until then, administrators can access and manage all legacy content.
|
||||
|
||||
## User Management
|
||||
|
||||
### Creating Users
|
||||
|
||||
**Via Web Interface (Coming Soon):**
|
||||
|
||||
!!! info "Web UI for User Management"
|
||||
A web-based user interface that allows administrators to manage users is coming in a future release. Until then, use the command-line scripts described below.
|
||||
|
||||
**Via Command Line Scripts:**
|
||||
|
||||
InvokeAI provides several command-line scripts in the `scripts/` directory for user management:
|
||||
|
||||
**useradd.py** - Add a new user:
|
||||
|
||||
```bash
|
||||
# Interactive mode (prompts for details)
|
||||
python scripts/useradd.py
|
||||
|
||||
# Create a regular user
|
||||
python scripts/useradd.py \
|
||||
--email user@example.com \
|
||||
--password TempPass123 \
|
||||
--name "User Name"
|
||||
|
||||
# Create an administrator
|
||||
python scripts/useradd.py \
|
||||
--email admin@example.com \
|
||||
--password AdminPass123 \
|
||||
--name "Admin Name" \
|
||||
--admin
|
||||
```
|
||||
|
||||
**userlist.py** - List all users:
|
||||
|
||||
```bash
|
||||
# List all users
|
||||
python scripts/userlist.py
|
||||
|
||||
# Show detailed information
|
||||
python scripts/userlist.py --verbose
|
||||
```
|
||||
|
||||
**usermod.py** - Modify an existing user:
|
||||
|
||||
```bash
|
||||
# Change display name
|
||||
python scripts/usermod.py --email user@example.com --name "New Name"
|
||||
|
||||
# Promote to administrator
|
||||
python scripts/usermod.py --email user@example.com --admin
|
||||
|
||||
# Demote from administrator
|
||||
python scripts/usermod.py --email user@example.com --no-admin
|
||||
|
||||
# Deactivate account
|
||||
python scripts/usermod.py --email user@example.com --deactivate
|
||||
|
||||
# Reactivate account
|
||||
python scripts/usermod.py --email user@example.com --activate
|
||||
|
||||
# Change password
|
||||
python scripts/usermod.py --email user@example.com --password NewPassword123
|
||||
```
|
||||
|
||||
**userdel.py** - Delete a user:
|
||||
|
||||
```bash
|
||||
# Delete a user (prompts for confirmation)
|
||||
python scripts/userdel.py --email user@example.com
|
||||
|
||||
# Delete without confirmation
|
||||
python scripts/userdel.py --email user@example.com --force
|
||||
```
|
||||
|
||||
!!! tip "Script Usage"
|
||||
Run any script with `--help` to see all available options:
|
||||
```bash
|
||||
python scripts/useradd.py --help
|
||||
```
|
||||
|
||||
!!! warning "Command Line Management"
|
||||
- These scripts directly modify the database
|
||||
- Always backup your database before making changes
|
||||
- Changes take effect immediately (users may need to log in again)
|
||||
- Deleting a user permanently removes all their content
|
||||
|
||||
### Editing Users
|
||||
|
||||
**Via Command Line:**
|
||||
|
||||
Use `usermod.py` as described above to modify user properties.
|
||||
|
||||
!!! warning "Last Administrator"
|
||||
You cannot remove admin privileges from the last remaining administrator account.
|
||||
|
||||
### Resetting User Passwords
|
||||
|
||||
**Via Web Interface (Coming Soon):**
|
||||
|
||||
Web-based password reset functionality for administrators is coming in a future release.
|
||||
|
||||
**Via Command Line:**
|
||||
|
||||
```bash
|
||||
# Reset a user's password
|
||||
python scripts/usermod.py --email user@example.com --password NewTempPassword123
|
||||
```
|
||||
|
||||
**Security Note:** Never send passwords via email or unsecured channels. Use secure communication methods.
|
||||
|
||||
### Deactivating Users
|
||||
|
||||
**Via Command Line:**
|
||||
|
||||
```bash
|
||||
# Deactivate a user account
|
||||
python scripts/usermod.py --email user@example.com --deactivate
|
||||
|
||||
# Reactivate a user account
|
||||
python scripts/usermod.py --email user@example.com --activate
|
||||
```
|
||||
|
||||
**Effects:**
|
||||
|
||||
- User cannot log in when deactivated
|
||||
- Existing sessions are immediately invalidated
|
||||
- User's data is preserved
|
||||
- Can be reactivated at any time
|
||||
|
||||
### Deleting Users
|
||||
|
||||
**Via Command Line:**
|
||||
|
||||
```bash
|
||||
# Delete a user (prompts for confirmation)
|
||||
python scripts/userdel.py --email user@example.com
|
||||
|
||||
# Delete without confirmation prompt
|
||||
python scripts/userdel.py --email user@example.com --force
|
||||
```
|
||||
|
||||
**Important:**
|
||||
|
||||
- ⚠️ This action is **permanent**
|
||||
- User's boards, images, and workflows are deleted
|
||||
- Cannot be undone
|
||||
- Consider deactivating instead of deleting
|
||||
|
||||
!!! warning "Data Loss"
|
||||
Deleting a user permanently removes all their content. Back up the database first if recovery might be needed.
|
||||
|
||||
### Viewing User Activity
|
||||
|
||||
**Queue Management:**
|
||||
|
||||
1. Navigate to **Admin** → **Queue Overview**
|
||||
2. View all users' active and pending generations
|
||||
3. Filter by user
|
||||
4. Cancel stuck or problematic tasks
|
||||
|
||||
**User Statistics:**
|
||||
|
||||
- Number of boards created
|
||||
- Number of images generated
|
||||
- Storage usage (if enabled)
|
||||
- Last login time
|
||||
|
||||
## Model Management
|
||||
|
||||
As an administrator, you have full access to model management.
|
||||
|
||||
### Adding Models
|
||||
|
||||
**Via Model Manager UI:**
|
||||
|
||||
1. Go to **Models** tab
|
||||
2. Click **Add Model**
|
||||
3. Choose installation method:
|
||||
- **From URL**: Provide HuggingFace repo or download URL
|
||||
- **From Local Path**: Scan local directories
|
||||
- **Import**: Import model from filesystem
|
||||
|
||||
**Supported Model Types:**
|
||||
|
||||
- Main models (Stable Diffusion, SDXL, FLUX)
|
||||
- LoRA models
|
||||
- ControlNet models
|
||||
- VAE models
|
||||
- Textual Inversions
|
||||
- IP-Adapters
|
||||
|
||||
### Configuring Models
|
||||
|
||||
**Model Settings:**
|
||||
|
||||
- Display name
|
||||
- Description
|
||||
- Default generation settings (CFG, steps, scheduler)
|
||||
- Variant selection (fp16/fp32)
|
||||
- Model thumbnail image
|
||||
|
||||
**Default Settings:**
|
||||
|
||||
Set default parameters that users will start with:
|
||||
|
||||
1. Select a model
|
||||
2. Go to **Default Settings** tab
|
||||
3. Configure:
|
||||
- CFG Scale
|
||||
- Steps
|
||||
- Scheduler
|
||||
- VAE selection
|
||||
4. Save settings
|
||||
|
||||
### Removing Models
|
||||
|
||||
1. Go to **Models** tab
|
||||
2. Select model(s) to remove
|
||||
3. Click **Delete**
|
||||
4. Confirm deletion
|
||||
|
||||
!!! warning "Impact"
|
||||
Removing a model affects all users who may be using it in workflows or saved settings.
|
||||
|
||||
## Shared Boards
|
||||
|
||||
Shared boards enable collaboration between users while maintaining control.
|
||||
|
||||
!!! note "Future Feature"
|
||||
Board sharing will be implemented in a future release.
|
||||
|
||||
### Creating Shared Boards
|
||||
|
||||
1. Log in as administrator
|
||||
2. Create a new board (or use existing board)
|
||||
3. Right-click the board → **Share Board**
|
||||
4. Add users and set permissions
|
||||
5. Click **Save Sharing Settings**
|
||||
|
||||
### Permission Levels
|
||||
|
||||
| Level | View | Add Images | Edit/Delete | Manage Sharing |
|
||||
|-------|------|------------|-------------|----------------|
|
||||
| **Read** | ✅ | ❌ | ❌ | ❌ |
|
||||
| **Write** | ✅ | ✅ | ✅ | ❌ |
|
||||
| **Admin** | ✅ | ✅ | ✅ | ✅ |
|
||||
|
||||
**Permission Recommendations:**
|
||||
|
||||
- **Read**: For viewers who should see but not modify content
|
||||
- **Write**: For active collaborators who add and organize images
|
||||
- **Admin**: For trusted users who help manage the shared board
|
||||
|
||||
### Managing Shared Boards
|
||||
|
||||
**Add Users to Shared Board:**
|
||||
|
||||
1. Right-click shared board → **Manage Sharing**
|
||||
2. Click **Add User**
|
||||
3. Select user from dropdown
|
||||
4. Choose permission level
|
||||
5. Save changes
|
||||
|
||||
**Remove Users from Shared Board:**
|
||||
|
||||
1. Right-click shared board → **Manage Sharing**
|
||||
2. Find user in list
|
||||
3. Click **Remove**
|
||||
4. Confirm removal
|
||||
|
||||
**Change User Permissions:**
|
||||
|
||||
1. Right-click shared board → **Manage Sharing**
|
||||
2. Find user in list
|
||||
3. Change permission dropdown
|
||||
4. Save changes
|
||||
|
||||
### Shared Board Best Practices
|
||||
|
||||
- Give meaningful names to shared boards
|
||||
- Document the board's purpose in the description
|
||||
- Assign minimum necessary permissions
|
||||
- Regularly audit access lists
|
||||
- Remove users who no longer need access
|
||||
|
||||
## Security
|
||||
|
||||
### Password Policies
|
||||
|
||||
**Enforced Requirements:**
|
||||
|
||||
- Minimum 8 characters
|
||||
- Must contain uppercase letters
|
||||
- Must contain lowercase letters
|
||||
- Must contain numbers
|
||||
|
||||
**Recommended Policies:**
|
||||
|
||||
- Require 12+ character passwords
|
||||
- Include special characters
|
||||
- Implement password rotation every 90 days
|
||||
- Prevent password reuse
|
||||
- Use multi-factor authentication (when available)
|
||||
|
||||
### Session Management
|
||||
|
||||
**Session Security and Token Management:**
|
||||
|
||||
This system uses stateless JWT tokens with HMAC signatures to
|
||||
identify users after they provide their initial credentials. The
|
||||
tokens will persist for 24 hours by default, or for 7 days if the user
|
||||
clicks the "Remember me" checkbox at login. Expired tokens are
|
||||
automatically rejected and the user will have to log in again.
|
||||
|
||||
At the client side, tokens are stored in browser localStorage. Logging
|
||||
out clears them. No server-side session storage is required.
|
||||
|
||||
The tokens include the user's ID, email, and admin status, along with
|
||||
an HMAC signature.
|
||||
|
||||
### Secret Key Management
|
||||
|
||||
**Important:** The JWT secret key must be kept confidential.
|
||||
|
||||
To generate tokens, each InvokeAI instance has a distinct secret JWT key that must be
|
||||
kept confidential. The key is stored in the `app_settings` table of
|
||||
the InvokeAI database with in a field value named `jwt_secret`.
|
||||
|
||||
The secret key is automatically generated during database creation or
|
||||
migration. If you wish to change the key, you may generate a
|
||||
replacement using either of these commands:
|
||||
|
||||
|
||||
```bash
|
||||
# Python
|
||||
python -c "import secrets; print(secrets.token_urlsafe(32))"
|
||||
|
||||
# OpenSSL
|
||||
openssl rand -base64 32
|
||||
```
|
||||
|
||||
Then cut and paste the printed secret into this Sqlite3 command:
|
||||
|
||||
```bash
|
||||
sqlite3 INVOKE_ROOT/databases/invokeai.db 'update app_settings set value="THE_SECRET" where key="jwt_secret"'
|
||||
```
|
||||
|
||||
(replace INVOKE_ROOT with your InvokeAI root directory and THE_SECRET
|
||||
with the new secret).
|
||||
|
||||
After this, restart the server. All logged in users will be logged out
|
||||
and will need to provide their usernames and passwords again.
|
||||
|
||||
### Hosting a Shared InvokeAI Instance
|
||||
|
||||
The multiuser feature allows you to run an InvokeAI backend that can
|
||||
be accessed by your friends and family across your home network. It is
|
||||
also possible to host a backend that is accessible over the Internet.
|
||||
|
||||
By default, InvokeAI runs on `localhost`, IP address `127.0.0.1`,
|
||||
which is only accessible to browsers running on the same machine as
|
||||
the backend. To make the backend accessible to any machine on your
|
||||
home or work LAN, add the line `host: 0.0.0.0` to the InvokeAI
|
||||
configuration file, usually stored at `INVOKE_ROOT/invokeai.yaml`.
|
||||
|
||||
Here is a minimal example.
|
||||
|
||||
```yaml
|
||||
# Internal metadata - do not edit:
|
||||
schema_version: 4.0.2
|
||||
|
||||
# Put user settings here - see https://invoke-ai.github.io/InvokeAI/configuration/:
|
||||
multiuser: true
|
||||
host: 0.0.0.0
|
||||
```
|
||||
|
||||
After relaunching the backend you will be able to reach the server
|
||||
from other machines on the LAN using the server machine's IP address
|
||||
or hostname and port 9090.
|
||||
|
||||
#### Connecting to the Internet
|
||||
|
||||
!!! warning "Use at your own risk"
|
||||
The InvokeAI team has done its best to make the software free of
|
||||
exploitable bugs, but the software has not undergone a rigorous security
|
||||
audit or intrusion testing. Use at your own risk
|
||||
|
||||
It is also possible to create a (semi) public server accessible from
|
||||
the Internet. The details of how to do this depend very much on your
|
||||
home or corporate router/firewall system and are beyond the scope of
|
||||
this document.
|
||||
|
||||
If you expose InvokeAI to the Internet, there are a number of
|
||||
precautions to take. Here is a brief list of recommended network
|
||||
security practices.
|
||||
|
||||
**HTTPS Configuration:**
|
||||
|
||||
For internet deployments, always use HTTPS:
|
||||
|
||||
```yaml
|
||||
# Use a reverse proxy like nginx or Traefik
|
||||
# Example nginx configuration:
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name invoke.example.com;
|
||||
|
||||
ssl_certificate /path/to/cert.pem;
|
||||
ssl_certificate_key /path/to/key.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:9090;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Firewall Rules:**
|
||||
|
||||
It is best to restrict access to trusted networks and remote IP
|
||||
addresses, or use a VPN to connect to your home network. Rate limit
|
||||
connections to InvokeAI's authentication endpoint
|
||||
`http://your.host:9090/login`.
|
||||
|
||||
**Backup and Recovery:**
|
||||
|
||||
It is a good idea to periodically backup your InvokeAI database,
|
||||
images, and possibly models in the event of unauthorized use of a
|
||||
publicly-accessible server.
|
||||
|
||||
**Manual Backup:**
|
||||
|
||||
```bash
|
||||
# Stop InvokeAI
|
||||
# Copy database file
|
||||
cd INVOKE_ROOT
|
||||
cp databases/invokeai.db databases/invokeai.db.$(date +%Y%m%d)
|
||||
|
||||
# Or create compressed backup
|
||||
tar -czf invokeai_backup_$(date +%Y%m%d).tar.gz databases/
|
||||
```
|
||||
|
||||
**Automated Backup Script:**
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# backup_invokeai.sh
|
||||
|
||||
INVOKE_ROOT="/path/to/invoke_root"
|
||||
BACKUP_DIR="/path/to/backups"
|
||||
DB_PATH="$INVOKE_ROOT/databases/invokeai.db"
|
||||
DATE=$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
# Create backup directory
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Copy database
|
||||
cp "$DB_PATH" "$BACKUP_DIR/invokeai_$DATE.db"
|
||||
|
||||
# Keep only last 30 days
|
||||
find "$BACKUP_DIR" -name "invokeai_*.db" -mtime +30 -delete
|
||||
|
||||
echo "Backup completed: invokeai_$DATE.db"
|
||||
```
|
||||
|
||||
**Schedule with cron:**
|
||||
|
||||
```bash
|
||||
# Edit crontab
|
||||
crontab -e
|
||||
|
||||
# Add daily backup at 2 AM
|
||||
0 2 * * * /path/to/backup_invokeai.sh
|
||||
```
|
||||
|
||||
|
||||
|
||||
```bash
|
||||
# Stop InvokeAI
|
||||
# Replace current database with backup
|
||||
cd INVOKE_ROOT
|
||||
cp databases/invokeai.db databases/invokeai.db.old # Save current
|
||||
cp databases/invokeai_backup.db databases/invokeai.db
|
||||
|
||||
# Restart InvokeAI
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
**Disaster Recover - Complete System Backup:**
|
||||
|
||||
Include these directories/files:
|
||||
|
||||
- `databases/` - All database files
|
||||
- `models/` - Installed models (if locally stored)
|
||||
- `outputs/` - Generated images
|
||||
- `invokeai.yaml` - Configuration file
|
||||
- Any custom scripts or modifications
|
||||
|
||||
**Recovery Process:**
|
||||
|
||||
1. Install InvokeAI on new system
|
||||
2. Restore configuration file
|
||||
3. Restore database directory
|
||||
4. Restore models and outputs
|
||||
5. Verify file permissions
|
||||
6. Start InvokeAI and test
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### User Cannot Login
|
||||
|
||||
**Symptom:** User reports unable to log in
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
1. Verify account exists and is active
|
||||
```bash
|
||||
sqlite3 databases/invokeai.db "SELECT * FROM users WHERE email = 'user@example.com';"
|
||||
```
|
||||
|
||||
2. Check password (have user try resetting)
|
||||
3. Verify account is active (`is_active = 1`)
|
||||
4. Check for account lockout (if implemented)
|
||||
|
||||
**Solutions:**
|
||||
|
||||
- Reset user password
|
||||
- Reactivate disabled account
|
||||
- Verify email address is correct
|
||||
- Check system logs for auth errors
|
||||
|
||||
### Database Locked Errors
|
||||
|
||||
**Symptom:** "Database is locked" errors
|
||||
|
||||
**Causes:**
|
||||
|
||||
- Concurrent write operations
|
||||
- Long-running transactions
|
||||
- Backup process accessing database
|
||||
- File system issues
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check for locks
|
||||
fuser databases/invokeai.db
|
||||
|
||||
# Increase timeout (in config)
|
||||
# Or switch to WAL mode:
|
||||
sqlite3 databases/invokeai.db "PRAGMA journal_mode=WAL;"
|
||||
```
|
||||
|
||||
### Forgotten Admin Password
|
||||
|
||||
**Recovery Process:**
|
||||
|
||||
1. Stop InvokeAI
|
||||
2. Direct database access:
|
||||
```bash
|
||||
sqlite3 databases/invokeai.db
|
||||
```
|
||||
|
||||
3. Reset admin password (requires password hash):
|
||||
```sql
|
||||
-- Generate hash first using Python:
|
||||
-- from passlib.context import CryptContext
|
||||
-- pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
||||
-- print(pwd_context.hash("NewPassword123"))
|
||||
|
||||
UPDATE users
|
||||
SET password_hash = '$2b$12$...'
|
||||
WHERE email = 'admin@example.com';
|
||||
```
|
||||
|
||||
4. Restart InvokeAI
|
||||
|
||||
**Alternative:** Remove `jwt_secret_key` from config to trigger setup wizard (will create new admin).
|
||||
|
||||
### Performance Issues
|
||||
|
||||
**Symptom:** Slow generation or UI
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
1. Check active generation count
|
||||
2. Review resource usage (CPU/GPU/RAM)
|
||||
3. Check database size and performance
|
||||
4. Review network latency
|
||||
|
||||
**Solutions:**
|
||||
|
||||
- Limit concurrent generations
|
||||
- Increase hardware resources
|
||||
- Optimize database (`VACUUM`, `ANALYZE`)
|
||||
- Add indexes for slow queries
|
||||
- Consider load balancing
|
||||
|
||||
### Migration Failures
|
||||
|
||||
**Symptom:** Database migration fails on upgrade
|
||||
|
||||
**Prevention:**
|
||||
|
||||
- Always backup before upgrading
|
||||
- Test migration on copy of database
|
||||
- Review migration logs
|
||||
|
||||
**Recovery:**
|
||||
|
||||
```bash
|
||||
# Restore backup
|
||||
cp databases/invokeai.db.backup databases/invokeai.db
|
||||
|
||||
# Try migration again with verbose logging
|
||||
invokeai-web --log-level DEBUG
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
### Complete Configuration Example for a Public Site
|
||||
|
||||
```yaml
|
||||
# invokeai.yaml - Multi-user configuration
|
||||
|
||||
# Internal metadata - do not edit:
|
||||
schema_version: 4.0.2
|
||||
|
||||
# Put user settings here
|
||||
multiuser: true
|
||||
|
||||
# Server
|
||||
host: "0.0.0.0"
|
||||
port: 9090
|
||||
|
||||
# Performance
|
||||
enable_partial_loading: true
|
||||
precision: float16
|
||||
pytorch_cuda_alloc_conf: "backend:cudaMallocAsync"
|
||||
hashing_algorithm: blake3_multi
|
||||
```
|
||||
## Frequently Asked Questions
|
||||
|
||||
### How many users can InvokeAI support?
|
||||
|
||||
The backend will support dozens of concurrent users. However, because
|
||||
the image generation queue is single-threaded, image generation tasks
|
||||
are processed on a first-come, first-serve basis. This means that a
|
||||
user may have to wait for all the other users' image generation jobs
|
||||
to complete before their generation job starts to execute.
|
||||
|
||||
A future version of InvokeAI may support concurrent execution on
|
||||
systems with multiple GPUs/graphics cards.
|
||||
|
||||
### Can I integrate with existing authentication systems?
|
||||
|
||||
OAuth2/OpenID Connect support is planned for a future release. Currently, InvokeAI uses its own authentication system.
|
||||
|
||||
### How do I audit user actions?
|
||||
|
||||
Full audit logging is planned for a future release. Currently, you can:
|
||||
|
||||
- Monitor the generation queue
|
||||
- Review database changes
|
||||
- Check application logs
|
||||
|
||||
### Can users have different model access?
|
||||
|
||||
Not in the current release. All users can view and use all installed models. Per-user model access is a possible enhancement.
|
||||
|
||||
### How do I handle user data when they leave?
|
||||
|
||||
Best practice:
|
||||
|
||||
1. Deactivate the account first
|
||||
2. Transfer ownership of shared boards
|
||||
3. After transition period, delete the account
|
||||
4. Or keep the account deactivated for audit purposes
|
||||
|
||||
### What's the licensing impact of multi-user mode?
|
||||
|
||||
InvokeAI remains under its existing license. Multi-user mode does not change licensing terms.
|
||||
|
||||
## Getting Help
|
||||
|
||||
### Support Resources
|
||||
|
||||
- **Documentation**: [InvokeAI Docs](https://invoke-ai.github.io/InvokeAI/)
|
||||
- **Discord**: [Join Community](https://discord.gg/ZmtBAhwWhy)
|
||||
- **GitHub Issues**: [Report Problems](https://github.com/invoke-ai/InvokeAI/issues)
|
||||
- **User Guide**: [For Users](user_guide.md)
|
||||
- **API Guide**: [For Developers](api_guide.md)
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
When reporting administrator issues, include:
|
||||
|
||||
- InvokeAI version
|
||||
- Operating system and version
|
||||
- Database size and user count
|
||||
- Relevant log excerpts
|
||||
- Steps to reproduce
|
||||
- Expected vs actual behavior
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [User Guide](user_guide.md) - For end users
|
||||
- [API Guide](api_guide.md) - For API consumers
|
||||
- [Multiuser Specification](specification.md) - Technical details
|
||||
|
||||
---
|
||||
|
||||
**Need additional assistance?** Visit the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy) or file an issue on [GitHub](https://github.com/invoke-ai/InvokeAI/issues).
|
||||
@@ -1,870 +0,0 @@
|
||||
# InvokeAI Multi-User Support - Detailed Specification
|
||||
|
||||
## 1. Executive Summary
|
||||
|
||||
This document provides a comprehensive specification for adding multi-user support to InvokeAI. The feature will enable a single InvokeAI instance to support multiple isolated users, each with their own generation settings, image boards, and workflows, while maintaining administrative controls for model management and system configuration.
|
||||
|
||||
## 2. Overview
|
||||
|
||||
### 2.1 Goals
|
||||
- Enable multiple users to share a single InvokeAI instance
|
||||
- Provide user isolation for personal content (boards, images, workflows, settings)
|
||||
- Maintain centralized model management by administrators
|
||||
- Support shared boards for collaboration
|
||||
- Provide secure authentication and authorization
|
||||
- Minimize impact on existing single-user installations
|
||||
|
||||
### 2.2 Non-Goals
|
||||
- Real-time collaboration features (multiple users editing same workflow simultaneously)
|
||||
- Advanced team management features (in initial release)
|
||||
- Migration of existing multi-user enterprise edition data
|
||||
- Support for external identity providers (in initial release, can be added later)
|
||||
|
||||
## 3. User Roles and Permissions
|
||||
|
||||
### 3.1 Administrator Role
|
||||
**Capabilities:**
|
||||
|
||||
- Full access to all InvokeAI features
|
||||
- Model management (add, delete, configure models)
|
||||
- User management (create, edit, delete users)
|
||||
- View and manage all users' queue sessions
|
||||
- Access system configuration
|
||||
- Create and manage shared boards
|
||||
- Grant/revoke administrative privileges to other users
|
||||
|
||||
**Restrictions:**
|
||||
|
||||
- Cannot delete their own account if they are the last administrator
|
||||
- Cannot revoke their own admin privileges if they are the last administrator
|
||||
|
||||
### 3.2 Regular User Role
|
||||
**Capabilities:**
|
||||
|
||||
- Create, edit, and delete their own image boards
|
||||
- Upload and manage their own assets
|
||||
- Use all image generation tools (linear, canvas, upscale, workflow tabs)
|
||||
- Create, edit, save, and load workflows
|
||||
- Access public/shared workflows
|
||||
- View and manage their own queue sessions
|
||||
- Adjust personal UI preferences (theme, hotkeys, etc.)
|
||||
- Access shared boards (read/write based on permissions)
|
||||
- **View model configurations** (read-only access to model manager)
|
||||
- **View model details, default settings, and metadata**
|
||||
|
||||
**Restrictions:**
|
||||
|
||||
- Cannot add, delete, or edit models
|
||||
- **Can view but cannot modify model manager settings** (read-only access)
|
||||
- Cannot reidentify, convert, or update model paths
|
||||
- Cannot upload or change model thumbnail images
|
||||
- Cannot save changes to model default settings
|
||||
- Cannot perform bulk delete operations on models
|
||||
- Cannot view or modify other users' boards, images, or workflows
|
||||
- Cannot cancel or modify other users' queue sessions
|
||||
- Cannot access system configuration
|
||||
- Cannot manage users or permissions
|
||||
|
||||
### 3.3 Future Role Considerations
|
||||
- **Viewer Role**: Read-only access (future enhancement)
|
||||
- **Team/Group-based Permissions**: Organizational hierarchy (future enhancement)
|
||||
|
||||
## 4. Authentication System
|
||||
|
||||
### 4.1 Authentication Method
|
||||
- **Primary Method**: Username and password authentication with secure password hashing
|
||||
- **Password Hashing**: Use bcrypt or Argon2 for password storage
|
||||
- **Session Management**: JWT tokens or secure session cookies
|
||||
- **Token Expiration**: Configurable session timeout (default: 7 days for "remember me", 24 hours otherwise)
|
||||
|
||||
### 4.2 Initial Administrator Setup
|
||||
**First-time Launch Flow:**
|
||||
|
||||
1. Application detects no administrator account exists
|
||||
2. Displays mandatory setup dialog (cannot be skipped)
|
||||
3. Prompts for:
|
||||
- Administrator username (email format recommended)
|
||||
- Administrator display name
|
||||
- Strong password (minimum requirements enforced)
|
||||
- Password confirmation
|
||||
4. Stores hashed credentials in configuration
|
||||
5. Creates administrator account in database
|
||||
6. Proceeds to normal login screen
|
||||
|
||||
**Reset Capability:**
|
||||
|
||||
- Administrators can be reset by manually editing the config file
|
||||
- Requires access to server filesystem (intentional security measure)
|
||||
- Database maintains user records; config file contains root admin credentials
|
||||
|
||||
### 4.3 Password Requirements
|
||||
- Minimum 8 characters
|
||||
- At least one uppercase letter
|
||||
- At least one lowercase letter
|
||||
- At least one number
|
||||
- At least one special character (optional but recommended)
|
||||
- Not in common password list
|
||||
|
||||
### 4.4 Login Flow
|
||||
|
||||
1. User navigates to InvokeAI URL
|
||||
2. If not authenticated, redirect to login page
|
||||
3. User enters username/email and password
|
||||
4. Optional "Remember me" checkbox for extended session
|
||||
5. Backend validates credentials
|
||||
6. On success: Generate session token, redirect to application
|
||||
7. On failure: Display error, allow retry with rate limiting (prevent brute force)
|
||||
|
||||
### 4.5 Logout Flow
|
||||
- User clicks logout button
|
||||
- Frontend clears session token
|
||||
- Backend invalidates session (if using server-side sessions)
|
||||
- Redirect to login page
|
||||
|
||||
### 4.6 Future Authentication Enhancements
|
||||
- OAuth2/OpenID Connect support
|
||||
- Two-factor authentication (2FA)
|
||||
- SSO integration
|
||||
- API key authentication for programmatic access
|
||||
|
||||
## 5. User Management
|
||||
|
||||
### 5.1 User Creation (Administrator)
|
||||
**Flow:**
|
||||
|
||||
1. Administrator navigates to user management interface
|
||||
2. Clicks "Add User" button
|
||||
3. Enters user information:
|
||||
- Email address (required, used as username)
|
||||
- Display name (optional, defaults to email)
|
||||
- Role (User or Administrator)
|
||||
- Initial password or "Send invitation email"
|
||||
4. System validates email uniqueness
|
||||
5. System creates user account
|
||||
6. If invitation mode:
|
||||
- Generate one-time secure token
|
||||
- Send email with setup link
|
||||
- Link expires after 7 days
|
||||
7. If direct password mode:
|
||||
- Administrator provides initial password
|
||||
- User must change on first login
|
||||
|
||||
**Invitation Email Flow:**
|
||||
|
||||
1. User receives email with unique link
|
||||
2. Link contains secure token
|
||||
3. User clicks link, redirected to setup page
|
||||
4. User enters desired password
|
||||
5. Token validated and consumed (single-use)
|
||||
6. Account activated
|
||||
7. User redirected to login page
|
||||
|
||||
### 5.2 User Profile Management
|
||||
**User Self-Service:**
|
||||
|
||||
- Update display name
|
||||
- Change password (requires current password)
|
||||
- Update email address (requires verification)
|
||||
- Manage UI preferences
|
||||
- View account creation date and last login
|
||||
|
||||
**Administrator Actions:**
|
||||
|
||||
- Edit user information (name, email)
|
||||
- Reset user password (generates reset link)
|
||||
- Toggle administrator privileges
|
||||
- Assign to groups (future feature)
|
||||
- Suspend/unsuspend account
|
||||
- Delete account (with data retention options)
|
||||
|
||||
### 5.3 Password Reset Flow
|
||||
**User-Initiated (Future Enhancement):**
|
||||
|
||||
1. User clicks "Forgot Password" on login page
|
||||
2. Enters email address
|
||||
3. System sends password reset link (if email exists)
|
||||
4. User clicks link, enters new password
|
||||
5. Password updated, user can login
|
||||
|
||||
**Administrator-Initiated:**
|
||||
|
||||
1. Administrator selects user
|
||||
2. Clicks "Send Password Reset"
|
||||
3. System generates reset token and link
|
||||
4. Email sent to user
|
||||
5. User follows same flow as user-initiated reset
|
||||
|
||||
## 6. Data Model and Database Schema
|
||||
|
||||
### 6.1 New Tables
|
||||
|
||||
#### 6.1.1 users
|
||||
```sql
|
||||
CREATE TABLE users (
|
||||
user_id TEXT NOT NULL PRIMARY KEY,
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
display_name TEXT,
|
||||
password_hash TEXT NOT NULL,
|
||||
is_admin BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
is_active BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
last_login_at DATETIME
|
||||
);
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
CREATE INDEX idx_users_is_admin ON users(is_admin);
|
||||
CREATE INDEX idx_users_is_active ON users(is_active);
|
||||
```
|
||||
|
||||
#### 6.1.2 user_sessions
|
||||
```sql
|
||||
CREATE TABLE user_sessions (
|
||||
session_id TEXT NOT NULL PRIMARY KEY,
|
||||
user_id TEXT NOT NULL,
|
||||
token_hash TEXT NOT NULL,
|
||||
expires_at DATETIME NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
last_activity_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
user_agent TEXT,
|
||||
ip_address TEXT,
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_user_sessions_user_id ON user_sessions(user_id);
|
||||
CREATE INDEX idx_user_sessions_expires_at ON user_sessions(expires_at);
|
||||
CREATE INDEX idx_user_sessions_token_hash ON user_sessions(token_hash);
|
||||
```
|
||||
|
||||
#### 6.1.3 user_invitations
|
||||
```sql
|
||||
CREATE TABLE user_invitations (
|
||||
invitation_id TEXT NOT NULL PRIMARY KEY,
|
||||
email TEXT NOT NULL,
|
||||
token_hash TEXT NOT NULL,
|
||||
invited_by_user_id TEXT NOT NULL,
|
||||
expires_at DATETIME NOT NULL,
|
||||
used_at DATETIME,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
FOREIGN KEY (invited_by_user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_user_invitations_email ON user_invitations(email);
|
||||
CREATE INDEX idx_user_invitations_token_hash ON user_invitations(token_hash);
|
||||
CREATE INDEX idx_user_invitations_expires_at ON user_invitations(expires_at);
|
||||
```
|
||||
|
||||
#### 6.1.4 shared_boards
|
||||
```sql
|
||||
CREATE TABLE shared_boards (
|
||||
board_id TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
permission TEXT NOT NULL CHECK(permission IN ('read', 'write', 'admin')),
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
PRIMARY KEY (board_id, user_id),
|
||||
FOREIGN KEY (board_id) REFERENCES boards(board_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_shared_boards_user_id ON shared_boards(user_id);
|
||||
CREATE INDEX idx_shared_boards_board_id ON shared_boards(board_id);
|
||||
```
|
||||
|
||||
### 6.2 Modified Tables
|
||||
|
||||
#### 6.2.1 boards
|
||||
```sql
|
||||
-- Add columns:
|
||||
ALTER TABLE boards ADD COLUMN user_id TEXT NOT NULL DEFAULT 'system';
|
||||
ALTER TABLE boards ADD COLUMN is_shared BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
ALTER TABLE boards ADD COLUMN created_by_user_id TEXT;
|
||||
|
||||
-- Add foreign key (requires recreation in SQLite):
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
FOREIGN KEY (created_by_user_id) REFERENCES users(user_id) ON DELETE SET NULL
|
||||
|
||||
-- Add indices:
|
||||
CREATE INDEX idx_boards_user_id ON boards(user_id);
|
||||
CREATE INDEX idx_boards_is_shared ON boards(is_shared);
|
||||
```
|
||||
|
||||
#### 6.2.2 images
|
||||
```sql
|
||||
-- Add column:
|
||||
ALTER TABLE images ADD COLUMN user_id TEXT NOT NULL DEFAULT 'system';
|
||||
|
||||
-- Add foreign key:
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
|
||||
-- Add index:
|
||||
CREATE INDEX idx_images_user_id ON images(user_id);
|
||||
```
|
||||
|
||||
#### 6.2.3 workflows
|
||||
```sql
|
||||
-- Add columns:
|
||||
ALTER TABLE workflows ADD COLUMN user_id TEXT NOT NULL DEFAULT 'system';
|
||||
ALTER TABLE workflows ADD COLUMN is_public BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Add foreign key:
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
|
||||
-- Add indices:
|
||||
CREATE INDEX idx_workflows_user_id ON workflows(user_id);
|
||||
CREATE INDEX idx_workflows_is_public ON workflows(is_public);
|
||||
```
|
||||
|
||||
#### 6.2.4 session_queue
|
||||
```sql
|
||||
-- Add column:
|
||||
ALTER TABLE session_queue ADD COLUMN user_id TEXT NOT NULL DEFAULT 'system';
|
||||
|
||||
-- Add foreign key:
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
|
||||
-- Add index:
|
||||
CREATE INDEX idx_session_queue_user_id ON session_queue(user_id);
|
||||
```
|
||||
|
||||
#### 6.2.5 style_presets
|
||||
```sql
|
||||
-- Add columns:
|
||||
ALTER TABLE style_presets ADD COLUMN user_id TEXT NOT NULL DEFAULT 'system';
|
||||
ALTER TABLE style_presets ADD COLUMN is_public BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Add foreign key:
|
||||
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
|
||||
|
||||
-- Add indices:
|
||||
CREATE INDEX idx_style_presets_user_id ON style_presets(user_id);
|
||||
CREATE INDEX idx_style_presets_is_public ON style_presets(is_public);
|
||||
```
|
||||
|
||||
### 6.3 Migration Strategy
|
||||
|
||||
1. Create new user tables (users, user_sessions, user_invitations, shared_boards)
|
||||
2. Create default 'system' user for backward compatibility
|
||||
3. Update existing data to reference 'system' user
|
||||
4. Add foreign key constraints
|
||||
5. Version as database migration (e.g., migration_25.py)
|
||||
|
||||
### 6.4 Migration for Existing Installations
|
||||
- Single-user installations: Prompt to create admin account on first launch after update
|
||||
- Existing data migration: Administrator can specify an arbitrary user account to hold legacy data (can be the admin account or a separate user)
|
||||
- System provides UI during migration to choose destination user for existing data
|
||||
|
||||
## 7. API Endpoints
|
||||
|
||||
### 7.1 Authentication Endpoints
|
||||
|
||||
#### POST /api/v1/auth/setup
|
||||
- Initialize first administrator account
|
||||
- Only works if no admin exists
|
||||
- Body: `{ email, display_name, password }`
|
||||
- Response: `{ success, user }`
|
||||
|
||||
#### POST /api/v1/auth/login
|
||||
- Authenticate user
|
||||
- Body: `{ email, password, remember_me? }`
|
||||
- Response: `{ token, user, expires_at }`
|
||||
|
||||
#### POST /api/v1/auth/logout
|
||||
- Invalidate current session
|
||||
- Headers: `Authorization: Bearer <token>`
|
||||
- Response: `{ success }`
|
||||
|
||||
#### GET /api/v1/auth/me
|
||||
- Get current user information
|
||||
- Headers: `Authorization: Bearer <token>`
|
||||
- Response: `{ user }`
|
||||
|
||||
#### POST /api/v1/auth/change-password
|
||||
- Change current user's password
|
||||
- Body: `{ current_password, new_password }`
|
||||
- Headers: `Authorization: Bearer <token>`
|
||||
- Response: `{ success }`
|
||||
|
||||
### 7.2 User Management Endpoints (Admin Only)
|
||||
|
||||
#### GET /api/v1/users
|
||||
- List all users (paginated)
|
||||
- Query params: `offset`, `limit`, `search`, `role_filter`
|
||||
- Response: `{ users[], total, offset, limit }`
|
||||
|
||||
#### POST /api/v1/users
|
||||
- Create new user
|
||||
- Body: `{ email, display_name, is_admin, send_invitation?, initial_password? }`
|
||||
- Response: `{ user, invitation_link? }`
|
||||
|
||||
#### GET /api/v1/users/{user_id}
|
||||
- Get user details
|
||||
- Response: `{ user }`
|
||||
|
||||
#### PATCH /api/v1/users/{user_id}
|
||||
- Update user
|
||||
- Body: `{ display_name?, is_admin?, is_active? }`
|
||||
- Response: `{ user }`
|
||||
|
||||
#### DELETE /api/v1/users/{user_id}
|
||||
- Delete user
|
||||
- Query params: `delete_data` (true/false)
|
||||
- Response: `{ success }`
|
||||
|
||||
#### POST /api/v1/users/{user_id}/reset-password
|
||||
- Send password reset email
|
||||
- Response: `{ success, reset_link }`
|
||||
|
||||
### 7.3 Shared Boards Endpoints
|
||||
|
||||
#### POST /api/v1/boards/{board_id}/share
|
||||
- Share board with users
|
||||
- Body: `{ user_ids[], permission: 'read' | 'write' | 'admin' }`
|
||||
- Response: `{ success, shared_with[] }`
|
||||
|
||||
#### GET /api/v1/boards/{board_id}/shares
|
||||
- Get board sharing information
|
||||
- Response: `{ shares[] }`
|
||||
|
||||
#### DELETE /api/v1/boards/{board_id}/share/{user_id}
|
||||
- Remove board sharing
|
||||
- Response: `{ success }`
|
||||
|
||||
### 7.4 Modified Endpoints
|
||||
|
||||
All existing endpoints will be modified to:
|
||||
|
||||
1. Require authentication (except setup/login)
|
||||
2. Filter data by current user (unless admin viewing all)
|
||||
3. Enforce permissions (e.g., model management requires admin)
|
||||
4. Include user context in operations
|
||||
|
||||
Example modifications:
|
||||
- `GET /api/v1/boards` → Returns only user's boards + shared boards
|
||||
- `POST /api/v1/session/queue` → Associates queue item with current user
|
||||
- `GET /api/v1/queue` → Returns all items for admin, only user's items for regular users
|
||||
|
||||
## 8. Frontend Changes
|
||||
|
||||
### 8.1 New Components
|
||||
|
||||
#### LoginPage
|
||||
- Email/password form
|
||||
- "Remember me" checkbox
|
||||
- Login button
|
||||
- Forgot password link (future)
|
||||
- Branding and welcome message
|
||||
|
||||
#### AdministratorSetup
|
||||
- Modal dialog (cannot be dismissed)
|
||||
- Administrator account creation form
|
||||
- Password strength indicator
|
||||
- Terms/welcome message
|
||||
|
||||
#### UserManagementPage (Admin only)
|
||||
- User list table
|
||||
- Add user button
|
||||
- User actions (edit, delete, reset password)
|
||||
- Search and filter
|
||||
- Role toggle
|
||||
|
||||
#### UserProfilePage
|
||||
- Display user information
|
||||
- Change password form
|
||||
- UI preferences
|
||||
- Account details
|
||||
|
||||
#### BoardSharingDialog
|
||||
- User picker/search
|
||||
- Permission selector
|
||||
- Share button
|
||||
- Current shares list
|
||||
|
||||
### 8.2 Modified Components
|
||||
|
||||
#### App Root
|
||||
- Add authentication check
|
||||
- Redirect to login if not authenticated
|
||||
- Handle session expiration
|
||||
- Add global error boundary for auth errors
|
||||
|
||||
#### Navigation/Header
|
||||
- Add user menu with logout
|
||||
- Display current user name
|
||||
- Admin indicator badge
|
||||
|
||||
#### ModelManagerTab
|
||||
- Hide/disable for non-admin users
|
||||
- Show "Admin only" message
|
||||
|
||||
#### QueuePanel
|
||||
- Filter by current user (for non-admin)
|
||||
- Show all with user indicators (for admin)
|
||||
- Disable actions on other users' items (for non-admin)
|
||||
|
||||
#### BoardsPanel
|
||||
- Show personal boards section
|
||||
- Show shared boards section
|
||||
- Add sharing controls to board actions
|
||||
|
||||
### 8.3 State Management
|
||||
|
||||
New Redux slices/zustand stores:
|
||||
- `authSlice`: Current user, authentication status, token
|
||||
- `usersSlice`: User list for admin interface
|
||||
- `sharingSlice`: Board sharing state
|
||||
|
||||
Updated slices:
|
||||
- `boardsSlice`: Include shared boards, ownership info
|
||||
- `queueSlice`: Include user filtering
|
||||
- `workflowsSlice`: Include public/private status
|
||||
|
||||
## 9. Configuration
|
||||
|
||||
### 9.1 New Config Options
|
||||
|
||||
Add to `InvokeAIAppConfig`:
|
||||
|
||||
```python
|
||||
# Authentication
|
||||
auth_enabled: bool = True # Enable/disable multi-user auth
|
||||
session_expiry_hours: int = 24 # Default session expiration
|
||||
session_expiry_hours_remember: int = 168 # "Remember me" expiration (7 days)
|
||||
password_min_length: int = 8 # Minimum password length
|
||||
require_strong_passwords: bool = True # Enforce password complexity
|
||||
|
||||
# Session tracking
|
||||
enable_server_side_sessions: bool = False # Optional server-side session tracking
|
||||
|
||||
# Audit logging
|
||||
audit_log_auth_events: bool = True # Log authentication events
|
||||
audit_log_admin_actions: bool = True # Log administrative actions
|
||||
|
||||
# Email (optional - for invitations and password reset)
|
||||
email_enabled: bool = False
|
||||
smtp_host: str = ""
|
||||
smtp_port: int = 587
|
||||
smtp_username: str = ""
|
||||
smtp_password: str = ""
|
||||
smtp_from_address: str = ""
|
||||
smtp_from_name: str = "InvokeAI"
|
||||
|
||||
# Initial admin (stored as hash)
|
||||
admin_email: Optional[str] = None
|
||||
admin_password_hash: Optional[str] = None
|
||||
```
|
||||
|
||||
### 9.2 Backward Compatibility
|
||||
|
||||
- If `auth_enabled = False`, system runs in legacy single-user mode
|
||||
- All data belongs to implicit "system" user
|
||||
- No authentication required
|
||||
- Smooth upgrade path for existing installations
|
||||
|
||||
## 10. Security Considerations
|
||||
|
||||
### 10.1 Password Security
|
||||
- Never store passwords in plain text
|
||||
- Use bcrypt or Argon2id for password hashing
|
||||
- Implement proper salt generation
|
||||
- Enforce password complexity requirements
|
||||
- Implement rate limiting on login attempts
|
||||
- Consider password breach checking (Have I Been Pwned API)
|
||||
|
||||
### 10.2 Session Security
|
||||
- Use cryptographically secure random tokens
|
||||
- Implement token rotation
|
||||
- Set appropriate cookie flags (HttpOnly, Secure, SameSite)
|
||||
- Implement session timeout and renewal
|
||||
- Invalidate sessions on logout
|
||||
- Clean up expired sessions periodically
|
||||
|
||||
### 10.3 Authorization
|
||||
- Always verify user identity from session token (never trust client)
|
||||
- Check permissions on every API call
|
||||
- Implement principle of least privilege
|
||||
- Validate user ownership of resources before operations
|
||||
- Implement proper error messages (avoid information leakage)
|
||||
|
||||
### 10.4 Data Isolation
|
||||
- Strict separation of user data in database queries
|
||||
- Prevent SQL injection via parameterized queries
|
||||
- Validate all user inputs
|
||||
- Implement proper access control checks
|
||||
- Audit trail for sensitive operations
|
||||
|
||||
### 10.5 API Security
|
||||
- Implement rate limiting on sensitive endpoints
|
||||
- Use HTTPS in production (enforce via config)
|
||||
- Implement CSRF protection
|
||||
- Validate and sanitize all inputs
|
||||
- Implement proper CORS configuration
|
||||
- Add security headers (CSP, X-Frame-Options, etc.)
|
||||
|
||||
### 10.6 Deployment Security
|
||||
- Document secure deployment practices
|
||||
- Recommend reverse proxy configuration (nginx, Apache)
|
||||
- Provide example configurations for HTTPS
|
||||
- Document firewall requirements
|
||||
- Recommend network isolation strategies
|
||||
|
||||
## 11. Email Integration (Optional)
|
||||
|
||||
**Note**: Email/SMTP configuration is optional. Many administrators will not have ready access to an outgoing SMTP server. When email is not configured, the system provides fallback mechanisms by displaying setup links directly in the admin UI.
|
||||
|
||||
### 11.1 Email Templates
|
||||
|
||||
#### User Invitation
|
||||
```
|
||||
Subject: You've been invited to InvokeAI
|
||||
|
||||
Hello,
|
||||
|
||||
You've been invited to join InvokeAI by [Administrator Name].
|
||||
|
||||
Click the link below to set up your account:
|
||||
[Setup Link]
|
||||
|
||||
This link expires in 7 days.
|
||||
|
||||
---
|
||||
InvokeAI
|
||||
```
|
||||
|
||||
#### Password Reset
|
||||
```
|
||||
Subject: Reset your InvokeAI password
|
||||
|
||||
Hello [User Name],
|
||||
|
||||
A password reset was requested for your account.
|
||||
|
||||
Click the link below to reset your password:
|
||||
[Reset Link]
|
||||
|
||||
This link expires in 24 hours.
|
||||
|
||||
If you didn't request this, please ignore this email.
|
||||
|
||||
---
|
||||
InvokeAI
|
||||
```
|
||||
|
||||
### 11.2 Email Service
|
||||
- Support SMTP configuration
|
||||
- Use secure connection (TLS)
|
||||
- Handle email failures gracefully
|
||||
- Implement email queue for reliability
|
||||
- Log email activities (without sensitive data)
|
||||
- Provide fallback for no-email deployments (show links in admin UI)
|
||||
|
||||
## 12. Testing Requirements
|
||||
|
||||
### 12.1 Unit Tests
|
||||
- Authentication service (password hashing, validation)
|
||||
- Authorization checks
|
||||
- Token generation and validation
|
||||
- User management operations
|
||||
- Shared board permissions
|
||||
- Data isolation queries
|
||||
|
||||
### 12.2 Integration Tests
|
||||
- Complete authentication flows
|
||||
- User creation and invitation
|
||||
- Password reset flow
|
||||
- Multi-user data isolation
|
||||
- Shared board access
|
||||
- Session management
|
||||
- Admin operations
|
||||
|
||||
### 12.3 Security Tests
|
||||
- SQL injection prevention
|
||||
- XSS prevention
|
||||
- CSRF protection
|
||||
- Session hijacking prevention
|
||||
- Brute force protection
|
||||
- Authorization bypass attempts
|
||||
|
||||
### 12.4 Performance Tests
|
||||
- Authentication overhead
|
||||
- Query performance with user filters
|
||||
- Concurrent user sessions
|
||||
- Database scalability with many users
|
||||
|
||||
## 13. Documentation Requirements
|
||||
|
||||
### 13.1 User Documentation
|
||||
- Getting started with multi-user InvokeAI
|
||||
- Login and account management
|
||||
- Using shared boards
|
||||
- Understanding permissions
|
||||
- Troubleshooting authentication issues
|
||||
|
||||
### 13.2 Administrator Documentation
|
||||
- Setting up multi-user InvokeAI
|
||||
- User management guide
|
||||
- Creating and managing shared boards
|
||||
- Email configuration
|
||||
- Security best practices
|
||||
- Backup and restore with user data
|
||||
|
||||
### 13.3 Developer Documentation
|
||||
- Authentication architecture
|
||||
- API authentication requirements
|
||||
- Adding new multi-user features
|
||||
- Database schema changes
|
||||
- Testing multi-user features
|
||||
|
||||
### 13.4 Migration Documentation
|
||||
- Upgrading from single-user to multi-user
|
||||
- Data migration strategies
|
||||
- Rollback procedures
|
||||
- Common issues and solutions
|
||||
|
||||
## 14. Future Enhancements
|
||||
|
||||
### 14.1 Phase 2 Features
|
||||
- **OAuth2/OpenID Connect integration** (deferred from initial release to keep scope manageable)
|
||||
- Two-factor authentication
|
||||
- API keys for programmatic access
|
||||
- Enhanced team/group management
|
||||
- Advanced permission system (roles and capabilities)
|
||||
|
||||
### 14.2 Phase 3 Features
|
||||
- SSO integration (SAML, LDAP)
|
||||
- User quotas and limits
|
||||
- Resource usage tracking
|
||||
- Advanced collaboration features
|
||||
- Workflow template library with permissions
|
||||
- Model access controls per user/group
|
||||
|
||||
## 15. Success Metrics
|
||||
|
||||
### 15.1 Functionality Metrics
|
||||
- Successful user authentication rate
|
||||
- Zero unauthorized data access incidents
|
||||
- All tests passing (unit, integration, security)
|
||||
- API response time within acceptable limits
|
||||
|
||||
### 15.2 Usability Metrics
|
||||
- User setup completion time < 2 minutes
|
||||
- Login time < 2 seconds
|
||||
- Clear error messages for all auth failures
|
||||
- Positive user feedback on multi-user features
|
||||
|
||||
### 15.3 Security Metrics
|
||||
- No critical security vulnerabilities identified
|
||||
- CodeQL scan passes
|
||||
- Penetration testing completed
|
||||
- Security best practices followed
|
||||
|
||||
## 16. Risks and Mitigations
|
||||
|
||||
### 16.1 Technical Risks
|
||||
| Risk | Impact | Probability | Mitigation |
|
||||
|------|--------|-------------|------------|
|
||||
| Performance degradation with user filtering | Medium | Low | Index optimization, query caching |
|
||||
| Database migration failures | High | Low | Thorough testing, rollback procedures |
|
||||
| Session management complexity | Medium | Medium | Use proven libraries (PyJWT), extensive testing |
|
||||
| Auth bypass vulnerabilities | High | Low | Security review, penetration testing |
|
||||
|
||||
### 16.2 UX Risks
|
||||
| Risk | Impact | Probability | Mitigation |
|
||||
|------|--------|-------------|------------|
|
||||
| Confusion in migration for existing users | Medium | High | Clear documentation, migration wizard |
|
||||
| Friction from additional login step | Low | High | Remember me option, long session timeout |
|
||||
| Complexity of admin interface | Medium | Medium | Intuitive UI design, user testing |
|
||||
|
||||
### 16.3 Operational Risks
|
||||
| Risk | Impact | Probability | Mitigation |
|
||||
|------|--------|-------------|------------|
|
||||
| Email delivery failures | Low | Medium | Show links in UI, document manual methods |
|
||||
| Lost admin password | High | Low | Document recovery procedure, config reset |
|
||||
| User data conflicts in migration | Medium | Low | Data validation, backup requirements |
|
||||
|
||||
## 17. Implementation Phases
|
||||
|
||||
### Phase 1: Foundation (Weeks 1-2)
|
||||
- Database schema design and migration
|
||||
- Basic authentication service
|
||||
- Password hashing and validation
|
||||
- Session management
|
||||
|
||||
### Phase 2: Backend API (Weeks 3-4)
|
||||
- Authentication endpoints
|
||||
- User management endpoints
|
||||
- Authorization middleware
|
||||
- Update existing endpoints with auth
|
||||
|
||||
### Phase 3: Frontend Auth (Weeks 5-6)
|
||||
- Login page and flow
|
||||
- Administrator setup
|
||||
- Session management
|
||||
- Auth state management
|
||||
|
||||
### Phase 4: Multi-tenancy (Weeks 7-9)
|
||||
- User isolation in all services
|
||||
- Shared boards implementation
|
||||
- Queue permission filtering
|
||||
- Workflow public/private
|
||||
|
||||
### Phase 5: Admin Interface (Weeks 10-11)
|
||||
- User management UI
|
||||
- Board sharing UI
|
||||
- Admin-specific features
|
||||
- User profile page
|
||||
|
||||
### Phase 6: Testing & Polish (Weeks 12-13)
|
||||
- Comprehensive testing
|
||||
- Security audit
|
||||
- Performance optimization
|
||||
- Documentation
|
||||
- Bug fixes
|
||||
|
||||
### Phase 7: Beta & Release (Week 14+)
|
||||
- Beta testing with selected users
|
||||
- Feedback incorporation
|
||||
- Final testing
|
||||
- Release preparation
|
||||
- Documentation finalization
|
||||
|
||||
## 18. Acceptance Criteria
|
||||
|
||||
- [ ] Administrator can set up initial account on first launch
|
||||
- [ ] Users can log in with email and password
|
||||
- [ ] Users can change their password
|
||||
- [ ] Administrators can create, edit, and delete users
|
||||
- [ ] User data is properly isolated (boards, images, workflows)
|
||||
- [ ] Shared boards work correctly with permissions
|
||||
- [ ] Non-admin users cannot access model management
|
||||
- [ ] Queue filtering works correctly for users and admins
|
||||
- [ ] Session management works correctly (expiry, renewal, logout)
|
||||
- [ ] All security tests pass
|
||||
- [ ] API documentation is updated
|
||||
- [ ] User and admin documentation is complete
|
||||
- [ ] Migration from single-user works smoothly
|
||||
- [ ] Performance is acceptable with multiple concurrent users
|
||||
- [ ] Backward compatibility mode works (auth disabled)
|
||||
|
||||
## 19. Design Decisions
|
||||
|
||||
The following design decisions have been approved for implementation:
|
||||
|
||||
1. **OAuth2 Priority**: OAuth2/OpenID Connect integration will be a **future enhancement**. The initial release will focus on username/password authentication to keep scope manageable.
|
||||
|
||||
2. **Email Requirement**: Email/SMTP configuration is **optional**. Many administrators will not have ready access to an outgoing SMTP server. The system will provide fallback mechanisms (showing setup links directly in the admin UI) when email is not configured.
|
||||
|
||||
3. **Data Migration**: During migration from single-user to multi-user mode, the administrator will be given the **option to specify an arbitrary user account** to hold legacy data. The admin account can be used for this purpose if the administrator wishes.
|
||||
|
||||
4. **API Compatibility**: Authentication will be **required on all APIs**, but authentication will not be required if multi-user support is disabled (backward compatibility mode with `auth_enabled: false`).
|
||||
|
||||
5. **Session Storage**: The system will use **JWT tokens with optional server-side session tracking**. This provides scalability while allowing administrators to enable server-side tracking if needed.
|
||||
|
||||
6. **Audit Logging**: The system will **log authentication events and admin actions**. This provides accountability and security monitoring for critical operations.
|
||||
|
||||
## 20. Conclusion
|
||||
|
||||
This specification provides a comprehensive blueprint for implementing multi-user support in InvokeAI. The design prioritizes:
|
||||
|
||||
- **Security**: Proper authentication, authorization, and data isolation
|
||||
- **Usability**: Intuitive UI, smooth migration, minimal friction
|
||||
- **Scalability**: Efficient database design, performant queries
|
||||
- **Maintainability**: Clean architecture, comprehensive testing
|
||||
- **Flexibility**: Future enhancement paths, optional features
|
||||
|
||||
The phased implementation approach allows for iterative development and testing, while the detailed specifications ensure all stakeholders have clear expectations of the final system.
|
||||
@@ -1,406 +0,0 @@
|
||||
# InvokeAI Multi-User Guide
|
||||
|
||||
## Overview
|
||||
|
||||
Multi-User mode is a recent feature (introduced in version 6.12), which allows multiple individuals to share a single InvokeAI server while keeping their work separate and organized. Each user has their own username and login password, images, assets, image boards, customization settings and workflows.
|
||||
|
||||
Two types of users are recognized:
|
||||
|
||||
* A user with **Administrator** status can add, remove and modify other users, and can install models. They also have the ability to view the full session queue and pause or kill other users' jobs.
|
||||
* **Non-administrator** users can modify their own profile but not others. They also do not have the ability to install or configure models, but must ask an Administrator to do this task.
|
||||
|
||||
Multiple users can be granted Administrator status.
|
||||
|
||||
***
|
||||
|
||||
## Getting Started
|
||||
|
||||
To activate Multi-User mode, open the `INVOKEAI_ROOT/invokeai.yaml` configuration file in a text editor. Add this line anywhere in the file:
|
||||
```yaml
|
||||
multiuser: true
|
||||
```
|
||||
|
||||
You may also wish to make InvokeAI available to other machines on your local LAN. Add an additional line to `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
host: 0.0.0.0
|
||||
```
|
||||
|
||||
Restart the server. It will now be in multi-user mode. If you enabled
|
||||
the `host` option, other users on your home or office LAN will be able
|
||||
to reach it by browsing to the IP address of the machine the backend
|
||||
is running on (`http://host-ip-address:9090`).
|
||||
|
||||
!!! tip "Do not expose InvokeAI to the internet"
|
||||
It is not recommended to expose the InvokeAI host to the internet
|
||||
due to security concerns.
|
||||
|
||||
### Initial Setup (First Time in Multi-User Mode)
|
||||
|
||||
If you're the first person to access a fresh InvokeAI installation in multi-user mode, you'll see the **Administrator Setup** dialog:
|
||||
|
||||

|
||||
|
||||
Now
|
||||
|
||||
1. Enter your email address (this will be your login name)
|
||||
2. Create a display name (this will be the name other users see)
|
||||
3. Choose a strong password that meets the requirements:
|
||||
- At least 8 characters long
|
||||
- Contains uppercase letters
|
||||
- Contains lowercase letters
|
||||
- Contains numbers
|
||||
4. Confirm your password
|
||||
5. Click **Create Administrator Account**
|
||||
|
||||
You'll now be taken to a login screen and can enter the credentials
|
||||
you just created.
|
||||
|
||||
### Adding and Modifying Users
|
||||
|
||||
If you are logged in as Administrator, you can add additional users. Click on the small "person silhouette" icon at the bottom left of the main Invoke screen and select "User Management:"
|
||||
|
||||

|
||||
|
||||
This will take you to the User Management screen...
|
||||
|
||||

|
||||
|
||||
...where you can click "Create User" to add a new user.
|
||||
|
||||

|
||||
|
||||
The User Management screen also allows you to:
|
||||
|
||||
1. Temporarily change a user's status to Inactive, preventing them from logging in to Invoke.
|
||||
2. Edit a user (by clicking on the pencil icon) to change the user's display name or password.
|
||||
3. Permanently delete a user.
|
||||
4. Grant a user Administrator privileges.
|
||||
|
||||
### Command-line User Management Scripts
|
||||
|
||||
Administrators can also use a series of command-line scripts to add, modify, or delete users. If you use the launcher, click the ">" icon to enter the command-line interface. Otherwise, if you are a native command-line user, activate the InvokeAI environment from your terminal.
|
||||
|
||||
The commands are named:
|
||||
|
||||
* **invoke-useradd** -- add a user
|
||||
* **invoke-usermod** -- modify a user
|
||||
* **invoke-userdel** -- delete a user
|
||||
* **invoke-userlist** -- list all users
|
||||
|
||||
Pass the `--help` argument to get the usage of each script. For example:
|
||||
|
||||
```bash
|
||||
> invoke-useradd --help
|
||||
usage: invoke-useradd [-h] [--root ROOT] [--email EMAIL] [--password PASSWORD] [--name NAME] [--admin]
|
||||
|
||||
Add a user to the InvokeAI database
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--root ROOT, -r ROOT Path to the InvokeAI root directory. If omitted, the root is resolved in this order: the $INVOKEAI_ROOT environment
|
||||
variable, the active virtual environment's parent directory, or $HOME/invokeai.
|
||||
--email EMAIL, -e EMAIL
|
||||
User email address
|
||||
--password PASSWORD, -p PASSWORD
|
||||
User password
|
||||
--name NAME, -n NAME User display name (optional)
|
||||
--admin, -a Make user an administrator
|
||||
|
||||
If no arguments are provided, the script will run in interactive mode.
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## Logging in as a Non-Administrative User
|
||||
|
||||
If you are a registered user on the system, enter your email address and password to log in. The Administrator will be able to provide you with the values to use:
|
||||
|
||||

|
||||
|
||||
As an unprivileged user you can do pretty much anything that's allowed under single-user mode -- generating images, using LoRAs, creating and running workflows, creating image boards -- but you are restricted against installing new models, changing low-level server settings, or interfering with other users. More information on user roles is given below.
|
||||
|
||||
### Changing your Profile
|
||||
|
||||
To change your display name or profile, click on the person silhouette icon at the bottom left of the screen and choose "My Profile". This will take you to a screen that lets you change these values. At this time you can change your display name but not your login ID (ordinarily your contact email address).
|
||||
|
||||
***
|
||||
|
||||
## Understanding User Roles
|
||||
|
||||
In single-user mode, you have access to all features without restrictions. In multi-user mode, InvokeAI has two user roles:
|
||||
|
||||
### Regular User
|
||||
|
||||
As a regular user, you can:
|
||||
|
||||
- ✅ Create and manage your own image boards
|
||||
- ✅ Generate images using all AI tools (Linear, Canvas, Upscale, Workflows)
|
||||
- ✅ Create, save, and load your own workflows
|
||||
- ✅ View your own generation queue
|
||||
- ✅ Customize your UI preferences (theme, hotkeys, etc.)
|
||||
- ✅ View available models (read-only access to Model Manager)
|
||||
- ✅ View shared and public boards created by other users
|
||||
- ✅ View and use workflows marked as shared by other users
|
||||
|
||||
You cannot:
|
||||
|
||||
- ❌ Add, delete, or modify models
|
||||
- ❌ View or modify other users' private boards, images, or workflows
|
||||
- ❌ Manage user accounts
|
||||
- ❌ Access system configuration
|
||||
- ❌ View or cancel other users' generation tasks
|
||||
|
||||
!!! tip "The generation queue"
|
||||
When two or more users are accessing InvokeAI at the same time,
|
||||
their image generation jobs will be placed on the session queue on
|
||||
a first-come, first-serve basis. This means that you will have to
|
||||
wait for other users' image rendering jobs to complete before
|
||||
yours will start.
|
||||
|
||||
When another user's job is running, you will see the image
|
||||
generation progress bar and a queue badge that reads `X/Y`, where
|
||||
"X" is the number of jobs you have queued and "Y" is the total
|
||||
number of jobs queued, including your own and others.
|
||||
|
||||
You can also pull up the Queue tab in order to see where your job
|
||||
is in relationship to other queued tasks.
|
||||
|
||||
### Administrator
|
||||
|
||||
Administrators have all regular user capabilities, plus:
|
||||
|
||||
- ✅ Full model management (add, delete, configure models)
|
||||
- ✅ Create and manage user accounts
|
||||
- ✅ View and manage all users' generation queues
|
||||
- ✅ View and manage all users' boards, images, and workflows (including system-owned legacy content)
|
||||
- ✅ Access system configuration
|
||||
- ✅ Grant or revoke admin privileges
|
||||
|
||||
***
|
||||
|
||||
## Working with Your Content in Multi-User Mode
|
||||
|
||||
### Image Boards
|
||||
|
||||
In multi-user mode, each user can create an unlimited number of boards and organize their images and assets as they see fit. Boards have three visibility levels:
|
||||
|
||||
- **Private** (default): Only you (and administrators) can see and modify the board.
|
||||
- **Shared**: All users can view the board and its contents, but only you (and administrators) can modify it (rename, archive, delete, or add/remove images).
|
||||
- **Public**: All users can view the board. Only you (and administrators) can modify the board's structure (rename, archive, delete).
|
||||
|
||||
To change a board's visibility, right-click on the board and select the desired visibility option.
|
||||
|
||||
Administrators can see and manage all users' image boards and their contents regardless of visibility settings.
|
||||
|
||||
### Going From Multi-User to Single-User Mode
|
||||
|
||||
If an InvokeAI instance was in multiuser mode and then restarted in single user mode (by setting `multiuser: false` in the configuration file), all users' boards will be consolidated in one place. Any images that were in "Uncategorized" will be merged together into a single Uncategorized board. If, at a later date, the server is restarted in multi-user mode, the boards and images will be separated and restored to their owners.
|
||||
|
||||
### Workflows
|
||||
|
||||
Each user has their own private workflow library. Workflows you create are visible only to you by default.
|
||||
|
||||
You can share a workflow with other users by marking it as **shared** (public). Shared workflows appear in all users' workflow libraries and can be opened by anyone, but only the owner (or an administrator) can modify or delete them.
|
||||
|
||||
To share a workflow, open it and use the sharing controls to toggle its public/shared status.
|
||||
|
||||
!!! warning "Preexisting workflows after enabling multi-user mode"
|
||||
When you enable multi-user mode for the first time on an existing InvokeAI installation, all workflows that were created before multi-user mode was activated will appear in the **shared workflows** section. These preexisting workflows are owned by the internal "system" account and are visible to all users. Administrators can edit or delete these shared legacy workflows. Regular users can view and use them but cannot modify them.
|
||||
|
||||
|
||||
### The Generation Queue
|
||||
|
||||
The queue shows your pending and running generation tasks.
|
||||
|
||||
**Queue Features:**
|
||||
|
||||
- View your current and completed generations
|
||||
- Cancel pending tasks
|
||||
- Re-run previous generations
|
||||
- Monitor progress in real-time
|
||||
|
||||
**Queue Isolation:**
|
||||
|
||||
- You will see your own queue items, as well as the items generated by
|
||||
either users, but the generation parameters (e.g. prompts) for other
|
||||
users' are hidden for privacy reasons.
|
||||
- Administrators can view all queues for troubleshooting
|
||||
- Your generations won't interfere with other users' tasks
|
||||
|
||||
***
|
||||
|
||||
## Customizing Your Experience
|
||||
|
||||
### Personal Preferences
|
||||
|
||||
Your UI preferences are saved to your account and are restored when you log in:
|
||||
|
||||
- **Theme**: Choose between light and dark modes
|
||||
- **Hotkeys**: Customize keyboard shortcuts
|
||||
- **Canvas Settings**: Default zoom, grid visibility, etc.
|
||||
- **Generation Defaults**: Default values for width, height, steps, etc.
|
||||
|
||||
These settings are stored per-user and won't affect other users.
|
||||
|
||||
***
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Cannot Log In
|
||||
|
||||
**Issue:** Login fails with "Incorrect email or password"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
- Verify you're entering the correct email address
|
||||
- Check that Caps Lock is off
|
||||
- Try typing the password slowly to avoid mistakes
|
||||
- Contact your administrator if you've forgotten your password
|
||||
|
||||
**Issue:** Login fails with "Account is disabled"
|
||||
|
||||
**Solution:** Contact your administrator to reactivate your account
|
||||
|
||||
### Session Expired
|
||||
|
||||
**Issue:** You're suddenly logged out and see "Session expired"
|
||||
|
||||
**Explanation:** Sessions expire after 24 hours (or 7 days with "remember me")
|
||||
|
||||
**Solution:** Simply log in again with your credentials
|
||||
|
||||
### Cannot Access Features
|
||||
|
||||
**Issue:** Features like Model Manager show "Admin privileges required"
|
||||
|
||||
**Explanation:** Some features are restricted to administrators
|
||||
|
||||
**Solution:**
|
||||
|
||||
- For model viewing: You can view but not modify models
|
||||
- For user management: Contact an administrator
|
||||
- For system configuration: Contact an administrator
|
||||
|
||||
### Missing Boards or Images
|
||||
|
||||
**Issue:** Boards or images you created are not visible
|
||||
|
||||
**Possible Causes:**
|
||||
|
||||
1. **Filter Applied:** Check if a filter is hiding content
|
||||
2. **Wrong User:** Ensure you're logged in with the correct account
|
||||
3. **Archived Board:** Check the "Show Archived" option
|
||||
|
||||
**Solution:**
|
||||
|
||||
- Clear any active filters
|
||||
- Verify you're logged in as the right user
|
||||
- Check archived items
|
||||
|
||||
### Slow Performance
|
||||
|
||||
**Issue:** Generation or UI feels slower than expected
|
||||
|
||||
**Possible Causes:**
|
||||
|
||||
- Other users generating images simultaneously
|
||||
- Server resource limits
|
||||
- Network latency
|
||||
|
||||
**Solutions:**
|
||||
|
||||
- Check the queue to see if others are generating
|
||||
- Wait for current generations to complete
|
||||
- Contact administrator if persistent
|
||||
|
||||
### Generation Stuck in Queue
|
||||
|
||||
**Issue:** Your generation is queued but not starting
|
||||
|
||||
**Possible Causes:**
|
||||
|
||||
- Server is processing other users' generations
|
||||
- Server resources are fully utilized
|
||||
- Technical issue with the server
|
||||
|
||||
**Solutions:**
|
||||
|
||||
- Wait for your turn in the queue
|
||||
- Check if your generation is paused
|
||||
- Contact administrator if stuck for extended period
|
||||
|
||||
|
||||
***
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
### Can other users see my images?
|
||||
|
||||
Not unless you change your board's visibility to "shared" or "public". All personal boards and images are private by default.
|
||||
|
||||
### Can I share my workflows with others?
|
||||
|
||||
Yes. You can mark any workflow as shared (public), which makes it visible to all users. Other users can view and use shared workflows, but only you or an administrator can modify or delete them.
|
||||
|
||||
### How long do sessions last?
|
||||
|
||||
- 24 hours by default
|
||||
- 7 days if you check "Remember me" during login
|
||||
|
||||
### Can I use the API with multi-user mode?
|
||||
|
||||
Yes, but you'll need to authenticate with a JWT token. See the [API Guide](api_guide.md) for details.
|
||||
|
||||
### What happens if I forget my password?
|
||||
|
||||
Contact your administrator. They can reset your password for you.
|
||||
|
||||
### Can I have multiple sessions?
|
||||
|
||||
Yes, you can log in from multiple devices or browsers simultaneously. All sessions will use the same account and see the same content.
|
||||
|
||||
### Why can't I see the Model Manager "Add Models" tab?
|
||||
|
||||
Regular users can see the Models tab but with read-only access. Check that you're logged in and try refreshing the page.
|
||||
|
||||
### How do I know if I'm an administrator?
|
||||
|
||||
Administrators see an "Admin" badge next to their name in the top-right corner and have access to additional features like User Management.
|
||||
|
||||
### Can I request admin privileges?
|
||||
|
||||
Yes, ask your current administrator to grant you admin
|
||||
privileges. Admin privileges will give you the ability to see all
|
||||
other user's boards and images, as well as to add models and change
|
||||
various server-wide settings.
|
||||
|
||||
## Getting Help
|
||||
|
||||
### Support Channels
|
||||
|
||||
- **Administrator:** Contact your system administrator for account issues
|
||||
- **Documentation:** Check the [FAQ](../faq.md) for common issues
|
||||
- **Community:** Join the [Discord](https://discord.gg/ZmtBAhwWhy) for help
|
||||
- **Bug Reports:** File issues on [GitHub](https://github.com/invoke-ai/InvokeAI/issues)
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
When reporting an issue, include:
|
||||
|
||||
- Your role (regular user or administrator)
|
||||
- What you were trying to do
|
||||
- What happened instead
|
||||
- Any error messages you saw
|
||||
- Your browser and operating system
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Administrator Guide](admin_guide.md) - For administrators managing users and the system
|
||||
- [API Guide](api_guide.md) - For developers using the InvokeAI API
|
||||
- [Multiuser Specification](specification.md) - Technical details about the feature
|
||||
- [InvokeAI Documentation](../index.md) - Main documentation hub
|
||||
|
||||
---
|
||||
|
||||
**Need more help?** Contact your administrator or visit the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
21
docs/.gitignore
vendored
@@ -1,21 +0,0 @@
|
||||
# build output
|
||||
dist/
|
||||
# generated types
|
||||
.astro/
|
||||
|
||||
# dependencies
|
||||
node_modules/
|
||||
|
||||
# logs
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
|
||||
# environment variables
|
||||
.env
|
||||
.env.production
|
||||
|
||||
# macOS-specific files
|
||||
.DS_Store
|
||||
@@ -1 +0,0 @@
|
||||
# Invoke AI Documentation
|
||||
173
docs/RELEASE.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Release Process
|
||||
|
||||
The app is published in twice, in different build formats.
|
||||
|
||||
- A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build.
|
||||
- An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs.
|
||||
|
||||
## General Prep
|
||||
|
||||
Make a developer call-out for PRs to merge. Merge and test things out.
|
||||
|
||||
While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build.
|
||||
|
||||
## Release Workflow
|
||||
|
||||
The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI.
|
||||
|
||||
It is triggered on **tag push**, when the tag matches `v*`. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same.
|
||||
|
||||
> Because commits are reference-counted, it is safe to create a release branch, tag it, let the workflow run, then delete the branch. So long as the tag exists, that commit will exist.
|
||||
|
||||
### Triggering the Workflow
|
||||
|
||||
Run `make tag-release` to tag the current commit and kick off the workflow.
|
||||
|
||||
The release may also be dispatched [manually].
|
||||
|
||||
### Workflow Jobs and Process
|
||||
|
||||
The workflow consists of a number of concurrently-run jobs, and two final publish jobs.
|
||||
|
||||
The publish jobs require manual approval and are only run if the other jobs succeed.
|
||||
|
||||
#### `check-version` Job
|
||||
|
||||
This job checks that the git ref matches the app version. It matches the ref against the `__version__` variable in `invokeai/version/invokeai_version.py`.
|
||||
|
||||
When the workflow is triggered by tag push, the ref is the tag. If the workflow is run manually, the ref is the target selected from the **Use workflow from** dropdown.
|
||||
|
||||
This job uses [samuelcolvin/check-python-version].
|
||||
|
||||
> Any valid [version specifier] works, so long as the tag matches the version. The release workflow works exactly the same for `RC`, `post`, `dev`, etc.
|
||||
|
||||
#### Check and Test Jobs
|
||||
|
||||
- **`python-tests`**: runs `pytest` on matrix of platforms
|
||||
- **`python-checks`**: runs `ruff` (format and lint)
|
||||
- **`frontend-tests`**: runs `vitest`
|
||||
- **`frontend-checks`**: runs `prettier` (format), `eslint` (lint), `dpdm` (circular refs), `tsc` (static type check) and `knip` (unused imports)
|
||||
|
||||
> **TODO** We should add `mypy` or `pyright` to the **`check-python`** job.
|
||||
|
||||
> **TODO** We should add an end-to-end test job that generates an image.
|
||||
|
||||
#### `build-installer` Job
|
||||
|
||||
This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts:
|
||||
|
||||
- **`dist`**: the python distribution, to be published on PyPI
|
||||
- **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release
|
||||
|
||||
#### Sanity Check & Smoke Test
|
||||
|
||||
At this point, the release workflow pauses as the remaining publish jobs require approval. Time to test the installer.
|
||||
|
||||
Because the installer pulls from PyPI, and we haven't published to PyPI yet, you will need to install from the wheel:
|
||||
|
||||
- Download and unzip `dist.zip` and the installer from the **Summary** tab of the workflow
|
||||
- Run the installer script using the `--wheel` CLI arg, pointing at the wheel:
|
||||
|
||||
```sh
|
||||
./install.sh --wheel ../InvokeAI-4.0.0rc6-py3-none-any.whl
|
||||
```
|
||||
|
||||
- Install to a temporary directory so you get the new user experience
|
||||
- Download a model and generate
|
||||
|
||||
> The same wheel file is bundled in the installer and in the `dist` artifact, which is uploaded to PyPI. You should end up with the exactly the same installation as if the installer got the wheel from PyPI.
|
||||
|
||||
##### Something isn't right
|
||||
|
||||
If testing reveals any issues, no worries. Cancel the workflow, which will cancel the pending publish jobs (you didn't approve them prematurely, right?).
|
||||
|
||||
Now you can start from the top:
|
||||
|
||||
- Fix the issues and PR the fixes per usual
|
||||
- Get the PR approved and merged per usual
|
||||
- Switch to `main` and pull in the fixes
|
||||
- Run `make tag-release` to move the tag to `HEAD` (which has the fixes) and kick off the release workflow again
|
||||
- Re-do the sanity check
|
||||
|
||||
#### PyPI Publish Jobs
|
||||
|
||||
The publish jobs will run if any of the previous jobs fail.
|
||||
|
||||
They use [GitHub environments], which are configured as [trusted publishers] on PyPI.
|
||||
|
||||
Both jobs require a maintainer to approve them from the workflow's **Summary** tab.
|
||||
|
||||
- Click the **Review deployments** button
|
||||
- Select the environment (either `testpypi` or `pypi`)
|
||||
- Click **Approve and deploy**
|
||||
|
||||
> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release.
|
||||
|
||||
##### Failing PyPI Publish
|
||||
|
||||
Check the [python infrastructure status page] for incidents.
|
||||
|
||||
If there are no incidents, contact @hipsterusername or @lstein, who have owner access to GH and PyPI, to see if access has expired or something like that.
|
||||
|
||||
#### `publish-testpypi` Job
|
||||
|
||||
Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment.
|
||||
|
||||
This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release.
|
||||
|
||||
If approved and successful, you could try out the test release like this:
|
||||
|
||||
```sh
|
||||
# Create a new virtual environment
|
||||
python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist
|
||||
# Install the distribution from Test PyPI
|
||||
pip install --index-url https://test.pypi.org/simple/ invokeai
|
||||
# Run and test the app
|
||||
invokeai-web
|
||||
# Cleanup
|
||||
deactivate
|
||||
rm -rf ~/.test-invokeai-dist
|
||||
```
|
||||
|
||||
#### `publish-pypi` Job
|
||||
|
||||
Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment.
|
||||
|
||||
## Publish the GitHub Release with installer
|
||||
|
||||
Once the release is published to PyPI, it's time to publish the GitHub release.
|
||||
|
||||
1. [Draft a new release] on GitHub, choosing the tag that triggered the release.
|
||||
1. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases.
|
||||
1. Use `scripts/get_external_contributions.py` to get a list of external contributions to shout out in the release notes.
|
||||
1. Upload the zip file created in **`build`** job into the Assets section of the release notes.
|
||||
1. Check **Set as a pre-release** if it's a pre-release.
|
||||
1. Check **Create a discussion for this release**.
|
||||
1. Publish the release.
|
||||
1. Announce the release in Discord.
|
||||
|
||||
> **TODO** Workflows can create a GitHub release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up.
|
||||
|
||||
## Manual Build
|
||||
|
||||
The `build installer` workflow can be dispatched manually. This is useful to test the installer for a given branch or tag.
|
||||
|
||||
No checks are run, it just builds.
|
||||
|
||||
## Manual Release
|
||||
|
||||
The `release` workflow can be dispatched manually. You must dispatch the workflow from the right tag, else it will fail the version check.
|
||||
|
||||
This functionality is available as a fallback in case something goes wonky. Typically, releases should be triggered via tag push as described above.
|
||||
|
||||
[InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[PyPI]: https://pypi.org/
|
||||
[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new
|
||||
[Test PyPI]: https://test.pypi.org/
|
||||
[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/
|
||||
[ncipollo/release-action]: https://github.com/ncipollo/release-action
|
||||
[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment
|
||||
[trusted publishers]: https://docs.pypi.org/trusted-publishers/
|
||||
[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version
|
||||
[manually]: #manual-release
|
||||
[python infrastructure status page]: https://status.python.org/
|
||||
|
Before Width: | Height: | Size: 284 KiB After Width: | Height: | Size: 284 KiB |
|
Before Width: | Height: | Size: 252 KiB After Width: | Height: | Size: 252 KiB |
|
Before Width: | Height: | Size: 359 KiB After Width: | Height: | Size: 359 KiB |
|
Before Width: | Height: | Size: 528 KiB After Width: | Height: | Size: 528 KiB |
|
Before Width: | Height: | Size: 601 KiB After Width: | Height: | Size: 601 KiB |
|
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 59 KiB |
|
Before Width: | Height: | Size: 142 KiB After Width: | Height: | Size: 142 KiB |
|
Before Width: | Height: | Size: 799 KiB After Width: | Height: | Size: 799 KiB |
|
Before Width: | Height: | Size: 122 KiB After Width: | Height: | Size: 122 KiB |
|
Before Width: | Height: | Size: 128 KiB After Width: | Height: | Size: 128 KiB |
|
Before Width: | Height: | Size: 99 KiB After Width: | Height: | Size: 99 KiB |
|
Before Width: | Height: | Size: 112 KiB After Width: | Height: | Size: 112 KiB |
|
Before Width: | Height: | Size: 107 KiB After Width: | Height: | Size: 107 KiB |
|
Before Width: | Height: | Size: 470 KiB After Width: | Height: | Size: 470 KiB |
|
Before Width: | Height: | Size: 457 KiB After Width: | Height: | Size: 457 KiB |
|
Before Width: | Height: | Size: 7.1 KiB After Width: | Height: | Size: 7.1 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
|
Before Width: | Height: | Size: 415 KiB After Width: | Height: | Size: 415 KiB |
|
Before Width: | Height: | Size: 499 KiB After Width: | Height: | Size: 499 KiB |
|
Before Width: | Height: | Size: 536 KiB After Width: | Height: | Size: 536 KiB |
|
Before Width: | Height: | Size: 4.0 MiB After Width: | Height: | Size: 4.0 MiB |
|
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 8.3 MiB After Width: | Height: | Size: 8.3 MiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 2.7 KiB After Width: | Height: | Size: 2.7 KiB |
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 221 KiB After Width: | Height: | Size: 221 KiB |
|
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
|
Before Width: | Height: | Size: 786 B After Width: | Height: | Size: 786 B |
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 3.3 KiB After Width: | Height: | Size: 3.3 KiB |
|
Before Width: | Height: | Size: 270 KiB After Width: | Height: | Size: 270 KiB |
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 184 KiB After Width: | Height: | Size: 184 KiB |