Compare commits

..

4 Commits

Author SHA1 Message Date
Lincoln Stein
0bbc83fe34 Bump up version to v2.3.0rc3
- Also changed the "latest" tag to "latest-2.3" to avoid interfering
  with the 2.2.5 update script, which fetches "latest"
2023-02-04 13:34:11 -05:00
Lincoln Stein
841e92e02a guesstimated disk space needed for "all" models 2023-02-04 13:32:34 -05:00
Lincoln Stein
4fbcdf2e28 adjust estimated file size reported by configure 2023-02-04 13:18:28 -05:00
Lincoln Stein
f744cf06e3 configuration script tidying up
- Rename configure_invokeai.py to invokeai_configure.py to be
  consistent with installed script name
- Remove warning message about half-precision models not being
  available during the model download process.
2023-02-04 12:00:17 -05:00
381 changed files with 5058 additions and 15016 deletions

View File

@@ -1,21 +1,18 @@
# use this file as a whitelist
*
!backend
!invokeai
!ldm
!pyproject.toml
!README.md
!scripts
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
**/*.ckpt
# ignore frontend but whitelist dist
invokeai/frontend/**
!invokeai/frontend/dist
# ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets
!invokeai/assets/web
# whitelist frontend, but ignore node_modules
invokeai/frontend/node_modules
# ignore python cache
**/__pycache__

57
.github/CODEOWNERS vendored
View File

@@ -1,50 +1,7 @@
# continuous integration
/.github/workflows/ @mauwii
# documentation
/docs/ @lstein @mauwii @tildebyte
mkdocs.yml @lstein @mauwii
# installation and configuration
/pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii
/scripts/ @ebr @lstein
/installer/ @ebr @lstein @tildebyte
ldm/invoke/config @lstein @ebr
invokeai/assets @lstein @ebr
invokeai/configs @lstein @ebr
/ldm/invoke/_version.py @lstein @blessedcoolant
# web ui
/invokeai/frontend @blessedcoolant @psychedelicious
/invokeai/backend @blessedcoolant @psychedelicious
# generation and model management
/ldm/*.py @lstein
/ldm/generate.py @lstein @keturn
/ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein
/ldm/invoke/ckpt_generator @lstein
/ldm/invoke/CLI.py @lstein
/ldm/invoke/config @lstein @ebr @mauwii
/ldm/invoke/generator @keturn @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein
/ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein
/ldm/invoke/patchmatch.py @Kyle0654
/ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn
/ldm/modules @damian0815 @keturn
# Nodes
apps/ @Kyle0654
# legacy REST API
# is CapableWeb still engaged?
/ldm/invoke/pngwriter.py @CapableWeb
/ldm/invoke/server_legacy.py @CapableWeb
/scripts/legacy_api.py @CapableWeb
/tests/legacy_tests.sh @CapableWeb
ldm/invoke/pngwriter.py @CapableWeb
ldm/invoke/server_legacy.py @CapableWeb
scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb
installer/ @ebr
.github/workflows/ @mauwii
docker/ @mauwii

View File

@@ -3,7 +3,6 @@ on:
push:
branches:
- 'main'
- 'update/ci/*'
tags:
- 'v*.*.*'
@@ -48,17 +47,16 @@ jobs:
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha,enable=true,prefix=sha-,format=short
type=sha
flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
suffix=${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: ${{ matrix.platforms }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
@@ -69,7 +67,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build container
uses: docker/build-push-action@v4
uses: docker/build-push-action@v3
with:
context: .
file: ${{ matrix.dockerfile }}

View File

@@ -1,41 +0,0 @@
name: PyPI Release
on:
push:
paths:
- 'ldm/invoke/_version.py'
workflow_dispatch:
jobs:
release:
if: github.repository == 'invoke-ai/InvokeAI'
runs-on: ubuntu-22.04
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
TWINE_NON_INTERACTIVE: 1
steps:
- name: checkout sources
uses: actions/checkout@v3
- name: install deps
run: pip install --upgrade build twine
- name: build package
run: python3 -m build
- name: check distribution
run: twine check dist/*
- name: check PyPI versions
if: github.ref == 'refs/heads/main'
run: |
pip install --upgrade requests
python -c "\
import scripts.pypi_helper; \
EXISTS=scripts.pypi_helper.local_on_pypi(); \
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
- name: upload package
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
run: twine upload dist/*

View File

@@ -8,11 +8,10 @@ on:
- 'ready_for_review'
- 'opened'
- 'synchronize'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
@@ -63,13 +62,28 @@ jobs:
# github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
env:
PIP_USE_PEP517: '1'
steps:
- name: Checkout sources
id: checkout-sources
uses: actions/checkout@v3
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Set Cache-Directory Windows
if: runner.os == 'Windows'
id: set-cache-dir-windows
run: |
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
- name: Set Cache-Directory others
if: runner.os != 'Windows'
id: set-cache-dir-others
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
@@ -78,29 +92,26 @@ jobs:
if: ${{ github.ref != 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: pip
cache-dependency-path: pyproject.toml
- name: install invokeai
env:
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
run: >
pip3 install
--use-pep517
--editable=".[test]"
- name: run pytest
id: run-pytest
run: pytest
- name: set INVOKEAI_OUTDIR
run: >
python -c
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
>> ${{ matrix.github-env }}
- name: Use Cached models
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-models
with:
path: ${{ env.CACHE_DIR }}
key: ${{ env.cache-name }}
enableCrossOsArchive: true
- name: run invokeai-configure
id: run-preload-models
@@ -113,8 +124,9 @@ jobs:
--full-precision
# can't use fp16 weights without a GPU
- name: run invokeai
id: run-invokeai
- name: Run the tests
if: runner.os != 'Windows'
id: run-tests
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
@@ -125,11 +137,10 @@ jobs:
--no-patchmatch
--no-nsfw_checker
--from_file ${{ env.TEST_PROMPTS }}
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
- name: Archive results
id: archive-results
uses: actions/upload-artifact@v3
with:
name: results
path: ${{ env.INVOKEAI_OUTDIR }}
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
path: ${{ env.INVOKEAI_ROOT }}/outputs

View File

@@ -92,7 +92,6 @@ You will need one of the following:
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- An Apple computer with an M1 chip.
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
We do not recommend the GTX 1650 or 1660 series video cards. They are
unable to run in half-precision mode and do not have sufficient VRAM

View File

@@ -1,23 +1,24 @@
# syntax=docker/dockerfile:1
# Maintained by Matthias Wild <mauwii@outlook.de>
ARG PYTHON_VERSION=3.9
##################
## base image ##
### base image ###
##################
FROM python:${PYTHON_VERSION}-slim AS python-base
# prepare for buildkit cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean
# Install necesarry packages
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
&& apt-get install -y \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# set working directory and path
@@ -26,61 +27,86 @@ ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
#######################
## build pyproject ##
#######################
FROM python-base AS pyproject-builder
ENV PIP_USE_PEP517=1
######################
### build frontend ###
######################
FROM node:lts as frontend-builder
# prepare for buildkit cache
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
RUN mkdir -p ${PIP_CACHE_DIR}
# Copy Sources
ARG APPDIR=/usr/src
WORKDIR ${APPDIR}
COPY --link . .
# install dependencies and build frontend
WORKDIR ${APPDIR}/invokeai/frontend
RUN \
--mount=type=cache,target=/usr/local/share/.cache/yarn/v6 \
yarn install \
--prefer-offline \
--frozen-lockfile \
--non-interactive \
--production=false \
&& yarn build
###################################
### install python dependencies ###
###################################
FROM python-base AS pyproject-builder
# Install dependencies
RUN \
--mount=type=cache,target=${PIP_CACHE_DIR} \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
python3 -m venv "${APPNAME}" \
RUN python3 -m venv "${APPNAME}" \
--upgrade-deps
# copy sources
COPY --link . .
COPY --from=frontend-builder ${APPDIR} .
# install pyproject.toml
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
ARG PIP_PACKAGE=.
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
# build patchmatch
RUN python3 -c "from patchmatch import patch_match"
RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \
"${APPDIR}/${APPNAME}/bin/pip" install \
--use-pep517 \
.
#####################
## runtime image ##
### runtime image ###
#####################
FROM python-base AS runtime
# setup environment
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
ENV INVOKEAI_ROOT=/data
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
# build patchmatch
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
&& PYTHONDONTWRITEBYTECODE=1 \
python3 -c "from patchmatch import patch_match" \
&& apt-get remove -y \
--autoremove \
build-essential \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/*
# set Entrypoint and default CMD
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]
LABEL org.opencontainers.image.authors="mauwii@outlook.de"

View File

@@ -17,14 +17,14 @@ DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}"
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
echo -e "Dockerfile: \t${DOCKERFILE}"
echo -e "index-url: \t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename: \t${VOLUMENAME}"
echo -e "Platform: \t${PLATFORM}"
echo -e "Registry: \t${CONTAINER_REGISTRY}"
echo -e "Repository: \t${CONTAINER_REPOSITORY}"
echo -e "Container Tag: \t${CONTAINER_TAG}"
echo -e "Container Image: ${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
@@ -35,10 +35,9 @@ else
fi
# Build Container
DOCKER_BUILDKIT=1 docker build \
docker build \
--platform="${PLATFORM}" \
--tag="${CONTAINER_IMAGE}" \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \
..

View File

@@ -2,12 +2,12 @@
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
if [[ -z "$CONTAINER_FLAVOR" ]]; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda"
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR=cuda
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
@@ -16,11 +16,9 @@ if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/rocm"}"
elif CONTAINER_FLAVOR=cpu; then
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/cpu"}"
fi
fi
@@ -32,7 +30,6 @@ PLATFORM="${PLATFORM-Linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"

View File

@@ -4,7 +4,7 @@ title: Changelog
# :octicons-log-16: **Changelog**
## v2.3.0 <small>(15 January 2023)</small>
## v2.3.0 <small>(15 January 2023)</small>
**Transition to diffusers
@@ -44,7 +44,7 @@ introduces several changes you should know about.
A configuration stanza for a diffuers model stored locally should
look like this, with a `format` of `diffusers`, but a `path` field
that points at the directory that contains `model_index.json`:
```
waifu-diffusion:
description: Latest waifu diffusion 1.4
@@ -94,7 +94,7 @@ introduces several changes you should know about.
!import_model /opt/sd-models/sd-1.4.ckpt
!import_model https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/blob/main/PaperCut_v1.ckpt
```
**KNOWN BUGS (15 January 2023)
1. On CUDA systems, the 768 pixel stable-diffusion-2.0 and
@@ -261,7 +261,7 @@ sections describe what's new for InvokeAI.
[Installation](installation/index.md).
- A streamlined manual installation process that works for both Conda and
PIP-only installs. See
[Manual Installation](installation/020_INSTALL_MANUAL.md).
[Manual Installation](installation/INSTALL_MANUAL.md).
- The ability to save frequently-used startup options (model to load, steps,
sampler, etc) in a `.invokeai` file. See
[Client](features/CLI.md)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

View File

@@ -6,51 +6,38 @@ title: Command-Line Interface
## **Interactive Command Line Interface**
The InvokeAI command line interface (CLI) provides scriptable access
to InvokeAI's features.Some advanced features are only available
through the CLI, though they eventually find their way into the WebUI.
The `invoke.py` script, located in `scripts/`, provides an interactive interface
to image generation similar to the "invoke mothership" bot that Stable AI
provided on its Discord server.
The CLI is accessible from the `invoke.sh`/`invoke.bat` launcher by
selecting option (1). Alternatively, it can be launched directly from
the command line by activating the InvokeAI environment and giving the
command:
```bash
invokeai
```
After some startup messages, you will be presented with the `invoke> `
prompt. Here you can type prompts to generate images and issue other
commands to load and manipulate generative models. The CLI has a large
number of command-line options that control its behavior. To get a
concise summary of the options, call `invokeai` with the `--help` argument:
```bash
invokeai --help
```
Unlike the `txt2img.py` and `img2img.py` scripts provided in the original
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion) source
code repository, the time-consuming initialization of the AI model
initialization only happens once. After that image generation from the
command-line interface is very fast.
The script uses the readline library to allow for in-line editing, command
history (++up++ and ++down++), autocompletion, and more. To help keep track of
which prompts generated which images, the script writes a log file of image
names and prompts to the selected output directory.
Here is a typical session
In addition, as of version 1.02, it also writes the prompt into the PNG file's
metadata where it can be retrieved using `scripts/images2prompt.py`
The script is confirmed to work on Linux, Windows and Mac systems.
!!! note
This script runs from the command-line or can be used as a Web application. The Web GUI is
currently rudimentary, but a much better replacement is on its way.
```bash
PS1:C:\Users\fred> invokeai
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
* Initializing, be patient...
* Initializing, be patient...
>> Initialization file /home/lstein/invokeai/invokeai.init found. Loading...
>> Internet connectivity is True
>> InvokeAI, version 2.3.0-rc5
>> InvokeAI runtime directory is "/home/lstein/invokeai"
>> GFPGAN Initialized
>> CodeFormer Initialized
>> ESRGAN Initialized
>> Using device_type cuda
>> xformers memory-efficient attention is available and enabled
(...more initialization messages...)
* Initialization done! Awaiting your command (-h for help, 'q' to quit)
Loading model from models/ldm/text2img-large/model.ckpt
(...more initialization messages...)
* Initialization done! Awaiting your command...
invoke> ashley judd riding a camel -n2 -s150
Outputs:
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
@@ -60,15 +47,27 @@ invoke> "there's a fly in my soup" -n6 -g
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
invoke> q
# this shows how to retrieve the prompt stored in the saved image's metadata
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
00009.png: "ashley judd riding a camel" -s150 -S 416354203
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
```
![invoke-py-demo](../assets/dream-py-demo.png)
The `invoke>` prompt's arguments are pretty much identical to those used in the
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you
do). A significant change is that creation of individual images is now the
default unless `--grid` (`-g`) is given. A full list is given in
[List of prompt arguments](#list-of-prompt-arguments).
## Arguments
The script recognizes a series of command-line switches that will
change important global defaults, such as the directory for image
outputs and the location of the model weight files.
The script itself also recognizes a series of command-line switches that will
change important global defaults, such as the directory for image outputs and
the location of the model weight files.
### List of arguments recognized at the command line
@@ -83,14 +82,10 @@ overridden on a per-prompt basis (see
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Location for generated images. |
| `--prompt_as_dir` | `-p` | `False` | Name output directories using the prompt text. |
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
| `--model <modelname>` | | `stable-diffusion-1.5` | Loads the initial model specified in configs/models.yaml. |
| `--ckpt_convert ` | | `False` | If provided both .ckpt and .safetensors files will be auto-converted into diffusers format in memory |
| `--autoconvert <path>` | | `None` | On startup, scan the indicated directory for new .ckpt/.safetensor files and automatically convert and import them |
| `--precision` | | `fp16` | Provide `fp32` for full precision mode, `fp16` for half-precision. `fp32` needed for Macintoshes and some NVidia cards. |
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
| `--png_compression <0-9>` | `-z<0-9>` | `6` | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
| `--safety-checker` | | `False` | Activate safety checker for NSFW and other potentially disturbing imagery |
| `--patchmatch`, `--no-patchmatch` | | `--patchmatch` | Load/Don't load the PatchMatch inpainting extension |
| `--xformers`, `--no-xformers` | | `--xformers` | Load/Don't load the Xformers memory-efficient attention module (CUDA only) |
| `--web` | | `False` | Start in web server mode |
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
@@ -114,7 +109,6 @@ overridden on a per-prompt basis (see
| Argument | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| `--full_precision` | | `False` | Same as `--precision=fp32`|
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
@@ -342,10 +336,8 @@ useful for debugging the text masking process prior to inpainting with the
### Model selection and importation
The CLI allows you to add new models on the fly, as well as to switch
among them rapidly without leaving the script. There are several
different model formats, each described in the [Model Installation
Guide](../installation/050_INSTALLING_MODELS.md).
The CLI allows you to add new models on the fly, as well as to switch among them
rapidly without leaving the script.
#### `!models`
@@ -355,9 +347,9 @@ model is bold-faced
Example:
<pre>
inpainting-1.5 not loaded Stable Diffusion inpainting model
<b>stable-diffusion-1.5 active Stable Diffusion v1.5</b>
waifu-diffusion not loaded Waifu Diffusion v1.4
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion not loaded Waifu Diffusion v1.3
</pre>
#### `!switch <model>`
@@ -369,30 +361,43 @@ Note how the second column of the `!models` table changes to `cached` after a
model is first loaded, and that the long initialization step is not needed when
loading a cached model.
#### `!import_model <hugging_face_repo_ID>`
<pre>
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 cached Stable Diffusion v1.4</b>
waifu-diffusion active Waifu Diffusion v1.3
This imports and installs a `diffusers`-style model that is stored on
the [HuggingFace Web Site](https://huggingface.co). You can look up
any [Stable Diffusion diffusers
model](https://huggingface.co/models?library=diffusers) and install it
with a command like the following:
invoke> !switch waifu-diffusion
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
>> Model loaded in 18.24s
>> Max VRAM used to load the model: 2.17G
>> Current VRAM usage:2.17G
>> Setting Sampler to k_lms
```bash
!import_model prompthero/openjourney
```
invoke> !models
laion400m not loaded <no description>
stable-diffusion-1.4 cached Stable Diffusion v1.4
<b>waifu-diffusion active Waifu Diffusion v1.3</b>
#### `!import_model <path/to/diffusers/directory>`
invoke> !switch stable-diffusion-1.4
>> Caching model waifu-diffusion in system RAM
>> Retrieving model stable-diffusion-1.4 from system RAM cache
>> Setting Sampler to k_lms
If you have a copy of a `diffusers`-style model saved to disk, you can
import it by passing the path to model's top-level directory.
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion cached Waifu Diffusion v1.3
</pre>
#### `!import_model <url>`
For a `.ckpt` or `.safetensors` file, if you have a direct download
URL for the file, you can provide it to `!import_model` and the file
will be downloaded and installed for you.
#### `!import_model <path/to/model/weights.ckpt>`
#### `!import_model <path/to/model/weights>`
This command imports a new model weights file into InvokeAI, makes it available
for image generation within the script, and writes out the configuration for the
@@ -412,12 +417,35 @@ below, the bold-faced text shows what the user typed in with the exception of
the width, height and configuration file paths, which were filled in
automatically.
#### `!import_model <path/to/directory_of_models>`
Example:
If you provide the path of a directory that contains one or more
`.ckpt` or `.safetensors` files, the CLI will scan the directory and
interactively offer to import the models it finds there. Also see the
`--autoconvert` command-line option.
<pre>
invoke> <b>!import_model models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt</b>
>> Model import in process. Please enter the values needed to configure this model:
Name for this model: <b>waifu-diffusion</b>
Description of this model: <b>Waifu Diffusion v1.3</b>
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
Default image width: <b>512</b>
Default image height: <b>512</b>
>> New configuration:
waifu-diffusion:
config: configs/stable-diffusion/v1-inference.yaml
description: Waifu Diffusion v1.3
height: 512
weights: models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
width: 512
OK to import [n]? <b>y</b>
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
invoke>
</pre>
#### `!edit_model <name_of_model>`
@@ -451,6 +479,11 @@ OK to import [n]? y
...
</pre>
======= invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3 ...lots of
text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
### History processing
The CLI provides a series of convenient commands for reviewing previous actions,

View File

@@ -4,24 +4,13 @@ title: Image-to-Image
# :material-image-multiple: Image-to-Image
Both the Web and command-line interfaces provide an "img2img" feature
that lets you seed your creations with an initial drawing or
photo. This is a really cool feature that tells stable diffusion to
build the prompt on top of the image you provide, preserving the
original's basic shape and layout.
## `img2img`
See the [WebUI Guide](WEB.md) for a walkthrough of the img2img feature
in the InvokeAI web server. This document describes how to use img2img
in the command-line tool.
## Basic Usage
Launch the command-line client by launching `invoke.sh`/`invoke.bat`
and choosing option (1). Alternative, activate the InvokeAI
environment and issue the command `invokeai`.
Once the `invoke> ` prompt appears, you can start an img2img render by
pointing to a seed file with the `-I` option as shown here:
This script also provides an `img2img` feature that lets you seed your creations
with an initial drawing or photo. This is a really cool feature that tells
stable diffusion to build the prompt on top of the image you provide, preserving
the original's basic shape and layout. To use it, provide the `--init_img`
option as shown here:
!!! example ""

View File

@@ -54,7 +54,8 @@ Please enter 1, 2, 3, or 4: [1] 3
```
From the command line, with the InvokeAI virtual environment active,
you can launch the front end with the command `invokeai-ti --gui`.
you can launch the front end with the command `textual_inversion
--gui`.
This will launch a text-based front end that will look like this:
@@ -226,12 +227,12 @@ It accepts a large number of arguments, which can be summarized by
passing the `--help` argument:
```sh
invokeai-ti --help
textual_inversion --help
```
Typical usage is shown here:
```sh
invokeai-ti \
textual_inversion \
--model=stable-diffusion-1.5 \
--resolution=512 \
--learnable_property=style \
@@ -266,4 +267,4 @@ resources:
---
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team

View File

@@ -5,14 +5,11 @@ title: InvokeAI Web Server
# :material-web: InvokeAI Web Server
As of version 2.0.0, this distribution comes with a full-featured web server
(see screenshot).
To use it, launch the `invoke.sh`/`invoke.bat` script and select
option (2). Alternatively, with the InvokeAI environment active, run
the `invokeai` script by adding the `--web` option:
(see screenshot). To use it, run the `invoke.py` script by adding the `--web`
option:
```bash
invokeai --web
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
```
You can then connect to the server by pointing your web browser at
@@ -22,23 +19,17 @@ address of the host you are running it on, or the wildcard `0.0.0.0`. For
example:
```bash
invoke.sh --host 0.0.0.0
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
```
or
## Quick guided walkthrough of the WebGUI's features
```bash
invokeai --web --host 0.0.0.0
```
## Quick guided walkthrough of the WebUI's features
While most of the WebUI's features are intuitive, here is a guided walkthrough
While most of the WebGUI's features are intuitive, here is a guided walkthrough
through its various components.
![Invoke Web Server - Major Components](../assets/invoke-web-server-1.png){:width="640px"}
The screenshot above shows the Text to Image tab of the WebUI. There are three
The screenshot above shows the Text to Image tab of the WebGUI. There are three
main sections:
1. A **control panel** on the left, which contains various settings for text to
@@ -72,14 +63,12 @@ From top to bottom, these are:
1. Text to Image - generate images from text
2. Image to Image - from an uploaded starting image (drawing or photograph)
generate a new one, modified by the text prompt
3. Unified Canvas - Interactively combine multiple images, extend them
with outpainting,and modify interior portions of the image with
inpainting, erase portions of a starting image and have the AI fill in
the erased region from a text prompt.
4. Workflow Management (not yet implemented) - this panel will allow you to create
pipelines of common operations and combine them into workflows.
5. Training (not yet implemented) - this panel will provide an interface to [textual
inversion training](TEXTUAL_INVERSION.md) and fine tuning.
3. Inpainting (pending) - Interactively erase portions of a starting image and
have the AI fill in the erased region from a text prompt.
4. Outpainting (pending) - Interactively add blank space to the borders of a
starting image and fill in the background from a text prompt.
5. Postprocessing (pending) - Interactively postprocess generated images using a
variety of filters.
The inpainting, outpainting and postprocessing tabs are currently in
development. However, limited versions of their features can already be accessed
@@ -87,18 +76,18 @@ through the Text to Image and Image to Image tabs.
## Walkthrough
The following walkthrough will exercise most (but not all) of the WebUI's
The following walkthrough will exercise most (but not all) of the WebGUI's
feature set.
### Text to Image
1. Launch the WebUI using `python scripts/invoke.py --web` and connect to it
1. Launch the WebGUI using `python scripts/invoke.py --web` and connect to it
with your browser by accessing `http://localhost:9090`. If the browser and
server are running on different machines on your LAN, add the option
`--host 0.0.0.0` to the launch command line and connect to the machine
hosting the web server using its IP address or domain name.
2. If all goes well, the WebUI should come up and you'll see a green
2. If all goes well, the WebGUI should come up and you'll see a green
`connected` message on the upper right.
#### Basics
@@ -245,7 +234,7 @@ walkthrough.
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
the blank area to get an upload dialog. The image will load into an area
marked _Initial Image_. (The WebUI will also load the most
marked _Initial Image_. (The WebGUI will also load the most
recently-generated image from the gallery into a section on the left, but
this image will be replaced in the next step.)
@@ -295,17 +284,13 @@ initial image" icons are located.
![Invoke Web Server - Use as Image Links](../assets/invoke-web-server-9.png){:width="640px"}
### Unified Canvas
See the [Unified Canvas Guide](UNIFIED_CANVAS.md)
## Parting remarks
This concludes the walkthrough, but there are several more features that you can
explore. Please check out the [Command Line Interface](CLI.md) documentation for
further explanation of the advanced features that were not covered here.
The WebUI is only rapid development. Check back regularly for updates!
The WebGUI is only rapid development. Check back regularly for updates!
## Reference

View File

@@ -2,62 +2,4 @@
title: Overview
---
Here you can find the documentation for InvokeAI's various features.
## The Basics
### * The [Web User Interface](WEB.md)
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
### * The [Unified Canvas](UNIFIED_CANVAS.md)
Build complex scenes by combine and modifying multiple images in a stepwise
fashion. This feature combines img2img, inpainting and outpainting in
a single convenient digital artist-optimized user interface.
### * The [Command Line Interface (CLI)](CLI.md)
Scriptable access to InvokeAI's features.
## Image Generation
### * [Prompt Engineering](PROMPTS.md)
Get the images you want with the InvokeAI prompt engineering language.
## * [Post-Processing](POSTPROCESS.md)
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
## * The [Concepts Library](CONCEPTS.md)
Add custom subjects and styles using HuggingFace's repository of embeddings.
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
Use a seed image to build new creations in the CLI.
### * [Inpainting Guide for the CLI](INPAINTING.md)
Selectively erase and replace portions of an existing image in the CLI.
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
Extend the borders of the image with an "outcrop" function within the CLI.
### * [Generating Variations](VARIATIONS.md)
Have an image you like and want to generate many more like it? Variations
are the ticket.
## Model Management
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This
guide also covers optimizing models to load quickly.
## * [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a
new model that combines characteristics of the originals.
## * [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects.
# Other Features
## * [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images.
## * [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more!
Here you can find the documentation for different features.

View File

@@ -81,6 +81,28 @@ Q&A</a>]
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
## :octicons-package-dependencies-24: Installation
This fork is supported across Linux, Windows and Macintosh. Linux users can use
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
First time users, please see
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
getting InvokeAI up and running on your system. For alternative installation and
upgrade instructions, please see:
[InvokeAI Installation Overview](installation/)
Users who wish to make use of the **PyPatchMatch** inpainting functions
will need to perform a bit of extra work to enable this
module. Instructions can be found at [Installing
PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
If you have an NVIDIA card, you can benefit from the significant
memory savings and performance benefits provided by Facebook Lab's
**xFormers** module. Instructions for Linux and Windows users can be found
at [Installing xFormers](installation/070_INSTALL_XFORMERS.md).
## :fontawesome-solid-computer: Hardware Requirements
### :octicons-cpu-24: System
@@ -100,146 +122,141 @@ images in full-precision mode:
- GTX 1650 series cards
- GTX 1660 series cards
### :fontawesome-solid-memory: Memory and Disk
### :fontawesome-solid-memory: Memory
- At least 12 GB Main Memory RAM.
### :fontawesome-regular-hard-drive: Disk
- At least 18 GB of free disk space for the machine learning model, Python, and
all its dependencies.
## :octicons-package-dependencies-24: Installation
!!! info
This fork is supported across Linux, Windows and Macintosh. Linux users can use
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
Precision is auto configured based on the device. If however you encounter errors like
`expected type Float but found Half` or `not implemented for Half` you can try starting
`invoke.py` with the `--precision=float32` flag:
### [Installation Getting Started Guide](installation)
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
This method is recommended for 1st time users
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
This method is recommended for experienced users and developers
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
This method is recommended for those familiar with running Docker containers
### Other Installation Guides
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
```bash
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
```
## :octicons-gift-24: InvokeAI Features
### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- separator -->
### The InvokeAI Command Line Interface
- [Command Line Interace Reference Guide](features/CLI.md)
<!-- separator -->
### Image Management
- [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md)
- [Outpainting](features/OUTPAINTING.md)
- [Adding custom styles and subjects](features/CONCEPTS.md)
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other Features](features/OTHER.md)
<!-- separator -->
### Model Management
- [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md)
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
- [The InvokeAI Web Interface](features/WEB.md) -
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- seperator -->
- [The Command Line Interace](features/CLI.md) -
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
[Outpainting](features/OUTPAINTING.md) -
[Adding custom styles and subjects](features/CONCEPTS.md) -
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
<!-- seperator -->
### Prompt Engineering
- [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md)
<!-- seperator -->
- [Prompt Engineering](features/PROMPTS.md)
<!-- seperator -->
- [Model Merging](features/MODEL_MERGING.md)
<!-- seperator -->
- Miscellaneous
- [NSFW Checker](features/NSFW.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other](features/OTHER.md)
## :octicons-log-16: Latest Changes
### v2.3.0 <small>(9 February 2023)</small>
### v2.2.4 <small>(11 December 2022)</small>
#### Migration to Stable Diffusion `diffusers` models
#### the `invokeai` directory
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
Previously there were two directories to worry about, the directory that
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
directory that contained the models files, embeddings, configuration and
outputs. With the 2.2.4 release, this dual system is done away with, and
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
live in a directory named `invokeai`. By default this directory is located in
your home directory (e.g. `\Users\yourname` on Windows), but you can select
where it goes at install time.
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
After installation, you can delete the install directory (the one that the zip
file creates when it unpacks). Do **not** delete or move the `invokeai`
directory!
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
##### Initialization file `invokeai/invokeai.init`
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
You can place frequently-used startup options in this file, such as the default
number of steps or your preferred sampler. To keep everything in one place, this
file has now been moved into the `invokeai` directory and is named
`invokeai.init`.
The WebGUI offers similar functionality for model management.
#### To update from Version 2.2.3
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
The easiest route is to download and unpack one of the 2.2.4 installer files.
When it asks you for the location of the `invokeai` runtime directory, respond
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
and answer "Y" when asked if you want to reuse the directory.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
does not know about the new directory layout and won't be fully functional.
#### Support for the `XFormers` Memory-Efficient Crossattention Package
#### To update to 2.2.5 (and beyond) there's now an update path.
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
As they become available, you can update to more recent versions of InvokeAI
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
Running it without any arguments will install the most recent version of
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
script with an argument in the command shell. This syntax accepts the path to
the desired release's zip file, which you can find by clicking on the green
"Code" button on this repository's home page.
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
#### Other 2.2.4 Improvements
#### A Negative Prompt Box in the WebUI
- Fix InvokeAI GUI initialization by @addianto in #1687
- fix link in documentation by @lstein in #1728
- Fix broken link by @ShawnZhong in #1736
- Remove reference to binary installer by @lstein in #1731
- documentation fixes for 2.2.3 by @lstein in #1740
- Modify installer links to point closer to the source installer by @ebr in
#1745
- add documentation warning about 1650/60 cards by @lstein in #1753
- Fix Linux source URL in installation docs by @andybearman in #1756
- Make install instructions discoverable in readme by @damian0815 in #1752
- typo fix by @ofirkris in #1755
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
in #1765
- stability and usage improvements to binary & source installers by @lstein in
#1760
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
- invoke script cds to its location before running by @lstein in #1805
- Make PaperCut and VoxelArt models load again by @lstein in #1730
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
#1817
- Clean up readme by @hipsterusername in #1820
- Optimized Docker build with support for external working directory by @ebr in
#1544
- disable pushing the cloud container by @mauwii in #1831
- Fix docker push github action and expand with additional metadata by @ebr in
#1837
- Fix Broken Link To Notebook by @VedantMadane in #1821
- Account for flat models by @spezialspezial in #1766
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
@SammCheese in #1848
- Make force free GPU memory work in img2img by @addianto in #1844
- New installer by @lstein
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
#### Model Merging
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
#### Textual Inversion Training
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
#### A New Installer Experience
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
#### Command-line name changes
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
* `invokeai` -- Command-line client
* `invokeai --web` -- Web GUI
* `invokeai-merge --gui` -- Model merging script with graphical front end
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
* `invokeai` => `ldm/invoke/CLI.py`
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
For older changelogs, please visit the
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting
Please check out our **[:material-frequently-asked-questions:
Troubleshooting
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
get solutions for common installation problems and other issues.
Please check out our
**[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get
solutions for common installation problems and other issues.
## :octicons-repo-push-24: Contributing
@@ -265,8 +282,8 @@ thank them for their time, hard work and effort.
For support, please use this repository's GitHub Issues tracking service. Feel
free to send me an email if you use and like the script.
Original portions of the software are Copyright (c) 2022-23
by [The InvokeAI Team](https://github.com/invoke-ai).
Original portions of the software are Copyright (c) 2020
[Lincoln D. Stein](https://github.com/lstein)
## :octicons-book-24: Further Reading

View File

@@ -6,106 +6,57 @@ title: Installing with the Automated Installer
## Introduction
The automated installer is a Python script that automates the steps
needed to install and run InvokeAI on a stock computer running recent
versions of Linux, MacOS or Windows. It will leave you with a version
that runs a stable version of InvokeAI with the option to upgrade to
experimental versions later.
The automated installer is a shell script that attempts to automate every step
needed to install and run InvokeAI on a stock computer running recent versions
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
version of InvokeAI with the option to upgrade to experimental versions later.
## Walk through
1. <a name="hardware_requirements">**Hardware Requirements**: </a>Make sure that your system meets the [hardware
requirements](../index.md#hardware-requirements) and has the
appropriate GPU drivers installed. For a system with an NVIDIA
card installed, you will need to install the CUDA driver, while
AMD-based cards require the ROCm driver. In most cases, if you've
already used the system for gaming or other graphics-intensive
tasks, the appropriate drivers will already be installed. If
unsure, check the [GPU Driver Guide](030_INSTALL_CUDA_AND_ROCM.md)
1. Make sure that your system meets the
[hardware requirements](../index.md#hardware-requirements) and has the
appropriate GPU drivers installed. In particular, if you are a Linux user
with an AMD GPU installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
!!! info "Required Space"
Installation requires roughly 18G of free disk space to load
the libraries and recommended model weights files.
Installation requires roughly 18G of free disk space to load the libraries and
recommended model weights files.
Regardless of your destination disk, your *system drive*
(`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB
of free disk space to download and cache python
dependencies.
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
NOTE for Linux users: if your temporary directory is mounted
as a `tmpfs`, ensure it has sufficient space.
2. Check that your system has an up-to-date Python installed. To do this, open
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
"Powershell" on Windows) and type `python --version`. If Python is
installed, it will print out the version number. If it is version `3.9.1` or `3.10.x`, you meet requirements.
2. <a name="software_requirements">**Software Requirements**: </a>Check that your system has an up-to-date Python installed. To do
this, open up a command-line window ("Terminal" on Linux and
Macintosh, "Command" or "Powershell" on Windows) and type `python
--version`. If Python is installed, it will print out the version
number. If it is version `3.9.1` or `3.10.x`, you meet
requirements.
!!! warning "At this time we do not recommend Python 3.11"
!!! warning "If you see an older version, or get a command not found error"
!!! warning "What to do if you have an unsupported version"
Go to [Python Downloads](https://www.python.org/downloads/) and
download the appropriate installer package for your platform. We recommend
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
which has been extensively tested with InvokeAI.
Go to [Python Downloads](https://www.python.org/downloads/)
and download the appropriate installer package for your
platform. We recommend [Version
3.10.9](https://www.python.org/downloads/release/python-3109/),
which has been extensively tested with InvokeAI. At this time
we do not recommend Python 3.11.
_Please select your platform in the section below for platform-specific
setup requirements._
=== "Windows"
During the Python configuration process, look out for a
checkbox to add Python to your PATH and select it. If the
install script complains that it can't find python, then open
the Python installer again and choose "Modify" existing
installation.
=== "Windows users"
Installation requires an up to date version of the Microsoft
Visual C libraries. Please install the 2015-2022 libraries
available here:
https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
- During the Python configuration process,
look out for a checkbox to add Python to your PATH
and select it. If the install script complains that it can't
find python, then open the Python installer again and choose
"Modify" existing installation.
Please double-click on the file `WinLongPathsEnabled.reg` and
accept the dialog box that asks you if you wish to modify your registry.
This activates long filename support on your system and will prevent
mysterious errors during installation.
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
=== "Linux"
To install an appropriate version of Python on Ubuntu 22.04
and higher, run the following:
=== "Mac users"
```
sudo apt update
sudo apt install -y python3 python3-pip python3-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
On Ubuntu 20.04, the process is slightly different:
```
sudo apt update
sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt install python3.10 python3-pip python3.10-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
Both `python` and `python3` commands are now pointing at
Python3.10. You can still access older versions of Python by
calling `python2`, `python3.8`, etc.
Linux systems require a couple of additional graphics
libraries to be installed for proper functioning of
`python3-opencv`. Please run the following:
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
=== "Mac"
After installing Python, you may need to run the
- After installing Python, you may need to run the
following command from the Terminal in order to install the Web
certificates needed to download model data from https sites. If
you see lots of CERTIFICATE ERRORS during the last part of the
@@ -113,55 +64,97 @@ experimental versions later.
`/Applications/Python\ 3.10/Install\ Certificates.command`
You may need to install the Xcode command line tools. These
- You may need to install the Xcode command line tools. These
are a set of tools that are needed to run certain applications in a
Terminal, including InvokeAI. This package is provided
directly by Apple. To install, open a terminal window and run `xcode-select --install`. You will get a macOS system popup guiding you through the
install. If you already have them installed, you will instead see some
output in the Terminal advising you that the tools are already installed. More information can be found at [FreeCode Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
Terminal, including InvokeAI. This package is provided directly by Apple.
3. **Download the Installer**: The InvokeAI installer is distributed as a ZIP files. Go to the
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
and look for a file named:
- To install, open a terminal window and run `xcode-select
--install`. You will get a macOS system popup guiding you through the
install. If you already have them installed, you will instead see some
output in the Terminal advising you that the tools are already installed.
- InvokeAI-installer-v2.X.X.zip
- More information can be found here:
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
where "2.X.X" is the latest released version. The file is located
at the very bottom of the release page, under **Assets**.
=== "Linux users"
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
directory named "InvokeAI-Installer". When unpacked, the directory
will look like this:
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
<figure markdown>
![zipfile-screenshot](../assets/installer-walkthrough/unpacked-zipfile.png)
</figure>
On Ubuntu 22.04 and higher, run the following:
5. **Launch the installer script from the desktop**: If you are using a desktop GUI, double-click the installer file
appropriate for your platform. It will be named `install.bat` on
Windows systems and `install.sh` on Linux and Macintosh
systems. Be aware that your system's file browser may suppress the
display of the file extension.
```
sudo apt update
sudo apt install -y python3 python3-pip python3-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
On Windows systems if you get an "Untrusted Publisher" warning.
Click on "More Info" and then select "Run Anyway." You trust us, right?
On Ubuntu 20.04, the process is slightly different:
6. **[Alternative] Launch the installer script from the command line**: Alternatively, from the command line, run the shell script or .bat file:
```
sudo apt update
sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt install python3.10 python3-pip python3.10-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
3. The source installer is distributed in ZIP files. Go to the
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
look for a series of files named:
- InvokeAI-installer-2.X.X.zip
(Where 2.X.X is the current release number).
Download the latest release.
4. Unpack the zip file into a convenient directory. This will create a new
directory named "InvokeAI-Installer". This example shows how this would look
using the `unzip` command-line tool, but you may use any graphical or
command-line Zip extractor:
```cmd
C:\Documents\Linco> unzip InvokeAI-installer-2.X.X-windows.zip
Archive: C: \Linco\Downloads\InvokeAI-installer-2.X.X-windows.zip
creating: InvokeAI-Installer\
inflating: InvokeAI-Installer\install.bat
inflating: InvokeAI-Installer\readme.txt
...
```
After successful installation, you can delete the `InvokeAI-Installer`
directory.
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
accept the dialog box that asks you if you wish to modify your registry.
This activates long filename support on your system and will prevent
mysterious errors during installation.
6. If you are using a desktop GUI, double-click the installer file. It will be
named `install.bat` on Windows systems and `install.sh` on Linux and
Macintosh systems.
On Windows systems you will probably get an "Untrusted Publisher" warning.
Click on "More Info" and select "Run Anyway." You trust us, right?
7. Alternatively, from the command line, run the shell script or .bat file:
```cmd
C:\Documents\Linco> cd InvokeAI-Installer
C:\Documents\Linco\invokeAI> install.bat
```
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
8. The script will ask you to choose where to install InvokeAI. Select a
directory with at least 18G of free space for a full install. InvokeAI and
all its support files will be installed into a new directory named
`invokeai` located at the location you specify.
<figure markdown>
![confirm-install-directory-screenshot](../assets/installer-walkthrough/confirm-directory.png)
</figure>
- The default is to install the `invokeai` directory in your home directory,
usually `C:\Users\YourName\invokeai` on Windows systems,
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
@@ -171,23 +164,9 @@ experimental versions later.
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
to suggest completions.
8. **Select your GPU**: The installer will autodetect your platform and will request you to
confirm the type of GPU your graphics card has. On Linux systems,
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
or CPU (no graphics acceleration). On Windows, you'll have the
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
you select CPU on M1 or M2 Macintoshes, you will get MPS-based
graphics acceleration without installing additional drivers. If you
are unsure what GPU you are using, you can ask the installer to
guess.
<figure markdown>
![choose-gpu-screenshot](../assets/installer-walkthrough/choose-gpu.png)
</figure>
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
libraries needed by InvokeAI and the application itself.
9. Sit back and let the install script work. It will install the third-party
libraries needed by InvokeAI, then download the current InvokeAI release and
install it.
Be aware that some of the library download and install steps take a long
time. In particular, the `pytorch` package is quite large and often appears
@@ -197,25 +176,25 @@ experimental versions later.
minutes and nothing is happening, you can interrupt the script with ^C. You
may restart it and it will pick up where it left off.
10. **Post-install Configuration**: After installation completes, the installer will launch the
configuration script, which will guide you through the first-time
process of selecting one or more Stable Diffusion model weights
files, downloading and configuring them. We provide a list of
popular models that InvokeAI performs well with. However, you can
add more weight files later on using the command-line client or
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
details.
10. After installation completes, the installer will launch the configuration script, which will guide you through the first-time process
of selecting one or more Stable Diffusion model weights files, downloading
and configuring them. We provide a list of popular models that InvokeAI
performs well with. However, you can add more weight files later on using
the command-line client or the Web UI. See
[Installing Models](050_INSTALLING_MODELS.md) for details.
<figure markdown>
![downloading-models-screenshot](../assets/installer-walkthrough/downloading-models.png)
</figure>
Note that the main Stable Diffusion weights file is protected by a license
agreement that you must agree to in order to use. The script will list the
steps you need to take to create an account on the official site that hosts
the weights files, accept the agreement, and provide an access token that
allows InvokeAI to legally download and install the weights files.
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
11. The script will now exit and you'll be ready to generate some images. Look
for the directory `invokeai` installed in the location you chose at the
beginning of the install session. Look for a shell script named `invoke.sh`
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
@@ -226,83 +205,49 @@ experimental versions later.
C:\Documents\Linco\invokeAI> invoke.bat
```
- The `invoke.bat` (`invoke.sh`) script will give you the choice
of starting (1) the command-line interface, (2) the web GUI, (3)
textual inversion training, and (4) model merging.
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
(1) the command-line interface, or (2) the web GUI. If you start the
latter, you can load the user interface by pointing your browser at
http://localhost:9090.
- By default, the script will launch the web interface. When you
do this, you'll see a series of startup messages ending with
instructions to point your browser at
http://localhost:9090. Click on this link to open up a browser
and start exploring InvokeAI's features.
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
customize its behavior. For example, you can change the location of the
image output directory, or select your favorite sampler. See the
[Command-Line Interface](../features/CLI.md) for a full list of the options.
- To set defaults that will take effect every time you launch InvokeAI,
use a text editor (e.g. Notepad) to exit the file
`invokeai\invokeai.init`. It contains a variety of examples that you can
follow to add and modify launch options.
- The launcher script also offers you an option labeled "open the developer
- The script also offers you a third option labeled "open the developer
console". If you choose this option, you will be dropped into a
command-line interface in which you can run python commands directly,
access developer tools, and launch InvokeAI with customized options.
12. You can launch InvokeAI with several different command-line arguments that
customize its behavior. For example, you can change the location of the
image output directory, or select your favorite sampler. See the
[Command-Line Interface](../features/CLI.md) for a full list of the options.
!!! warning "Do not move or remove the `invokeai` directory"
The `invokeai` directory contains the `invokeai` application, its
configuration files, the model weight files, and outputs of image generation.
Once InvokeAI is installed, do not move or remove this directory."
- To set defaults that will take effect every time you launch InvokeAI,
use a text editor (e.g. Notepad) to exit the file
`invokeai\invokeai.init`. It contains a variety of examples that you can
follow to add and modify launch options.
!!! warning "The `invokeai` directory contains the `invokeai` application, its
configuration files, the model weight files, and outputs of image generation.
Once InvokeAI is installed, do not move or remove this directory."
## Troubleshooting
### _Package dependency conflicts_
If you have previously installed InvokeAI or another Stable Diffusion
package, the installer may occasionally pick up outdated libraries and
either the installer or `invoke` will fail with complaints about
library conflicts. In this case, run the `invoke.sh`/`invoke.bat`
command and enter the Developer's Console by picking option (5). This
will take you to a command-line prompt.
If you have previously installed InvokeAI or another Stable Diffusion package,
the installer may occasionally pick up outdated libraries and either the
installer or `invoke` will fail with complaints about library conflicts. You can
address this by entering the `invokeai` directory and running `update.sh`, which
will bring InvokeAI up to date with the latest libraries.
Then give this command:
### ldm from pypi
`pip install InvokeAI --force-reinstall`
!!! warning
This should fix the issues.
### InvokeAI runs extremely slowly on Linux or Windows systems
The most frequent cause of this problem is when the installation
process installed the CPU-only version of the torch machine-learning
library, rather than a version that takes advantage of GPU
acceleration. To confirm this issue, look at the InvokeAI startup
messages. If you see a message saying ">> Using device CPU", then
this is what happened.
To fix this problem, first determine whether you have an NVidia or an
AMD GPU. The former uses the CUDA driver, and the latter uses ROCm
(only available on Linux). Then run the `invoke.sh`/`invoke.bat`
command and enter the Developer's Console by picking option (5). This
will take you to a command-line prompt.
Then type the following commands:
=== "NVIDIA System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu117
pip install xformers
```
=== "AMD System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
Some users have tried to correct dependency problems by installing
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
ldm will make matters worse. If you've installed ldm, uninstall it with
`pip uninstall ldm`.
### Corrupted configuration file
@@ -327,7 +272,7 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
assistance.
### Other Problems
### other problems
If you run into problems during or after installation, the InvokeAI team is
available to help you. Either create an
@@ -339,34 +284,36 @@ hours, and often much sooner.
## Updating to newer versions
This distribution is changing rapidly, and we add new features
regularly. Releases are announced at
http://github.com/invoke-ai/InvokeAI/releases, and at
https://pypi.org/project/InvokeAI/ To update to the latest released
version (recommended), follow these steps:
This distribution is changing rapidly, and we add new features on a daily basis.
To update to the latest released version (recommended), run the `update.sh`
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
release and re-run the `invokeai-configure` script to download any updated
models files that may be needed. You can also use this to add additional models
that you did not select at installation time.
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
`invokeai` root directory.
You can now close the developer console and run `invoke` as before. If you get
complaints about missing models, then you may need to do the additional step of
running `invokeai-configure`. This happens relatively infrequently. To do
this, simply open up the developer's console again and type
`invokeai-configure`.
2. Choose menu item (6) "Developer's Console". This will launch a new
command line.
You may also use the `update` script to install any selected version of
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
link of the version you wish to install. You can find the zip links by going to
the one of the release pages and looking for the **Assets** section at the
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
big code directory on the InvokeAI welcome page. When you find the version you
want to install, go to the green "&lt;&gt; Code" button at the top, and copy the
"Download ZIP" link.
3. Type the following command:
```bash
pip install InvokeAI --upgrade
```
4. Watch the installation run. Once it is complete, you may exit the
command line by typing `exit`, and then start InvokeAI from the
launch script as per usual.
Alternatively, if you wish to get the most recent unreleased
development version, perform the same steps to enter the developer's
console, and then type:
```bash
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
```
Now run `update.sh` (or `update.bat`) with the version number of the desired InvokeAI
version as its argument. For example, this will install the old 2.2.0 release.
```cmd
update.sh v2.2.0
```
You can get the list of version numbers by going to the [releases
page](https://github.com/invoke-ai/InvokeAI/releases) or by browsing
the (Tags)[https://github.com/invoke-ai/InvokeAI/tags] list from the
Code section of the main github page.

View File

@@ -14,46 +14,17 @@ title: Installing Manually
## Introduction
!!! tip "Conda"
As of InvokeAI v2.3.0 installation using the `conda` package manager is no longer being supported. It will likely still work, but we are not testing this installation method.
!!! tip As of InvokeAI v2.3.0 installation using the `conda` package manager
is no longer being supported. It will likely still work, but we are not testing
this installation method.
On Windows systems, you are encouraged to install and use the
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
which provides compatibility with Linux and Mac shells and nice
features such as command-line completion.
which provides compatibility with Linux and Mac shells and nice features such as
command-line completion.
### Prerequisites
Before you start, make sure you have the following preqrequisites
installed. These are described in more detail in [Automated
Installation](010_INSTALL_AUTOMATED.md), and in many cases will
already be installed (if, for example, you have used your system for
gaming):
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **Visual C++ Libraries** _Windows users_ must install the free
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
* **The Xcode command line tools** for _Macintosh users_. Instructions are
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* _Macintosh users_ may also need to run the `Install Certificates` command
if model downloads give lots of certificate errors. Run:
`/Applications/Python\ 3.10/Install\ Certificates.command`
### Installation Walkthrough
To install InvokeAI with virtual environments and the PIP package
manager, please follow these steps:
To install InvokeAI with virtual environments and the PIP package manager,
please follow these steps:
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
procedure depends on this and will not work with other versions:
@@ -62,125 +33,74 @@ manager, please follow these steps:
python -V
```
2. Create a directory to contain your InvokeAI library, configuration
files, and models. This is known as the "runtime" or "root"
directory, and often lives in your home directory under the name `invokeai`.
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
GitHub:
Please keep in mind the disk space requirements - you will need at
least 20GB for the models and the virtual environment. From now
on we will refer to this directory as `INVOKEAI_ROOT`. For convenience,
the steps below create a shell variable of that name which contains the
path to `HOME/invokeai`.
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
=== "Linux/Mac"
This will create InvokeAI folder where you will follow the rest of the
steps.
```bash
export INVOKEAI_ROOT="~/invokeai"
mkdir $INVOKEAI_ROOT
```
3. Create a directory of to contain your InvokeAI installation (known as the "runtime"
or "root" directory). This is where your models, configs, and outputs will live
by default. Please keep in mind the disk space requirements - you will need at
least 18GB (as of this writing) for the models and the virtual environment.
From now on we will refer to this directory as `INVOKEAI_ROOT`. This keeps the
runtime directory separate from the source code and aids in updating.
=== "Windows (Powershell)"
```bash
export INVOKEAI_ROOT="~/invokeai"
mkdir ${INVOKEAI_ROOT}
```
```bash
Set-Variable -Name INVOKEAI_ROOT -Value $Home/invokeai
mkdir $INVOKEAI_ROOT
```
4. From within the InvokeAI top-level directory, create and activate a virtual
environment named `.venv` and prompt displaying `InvokeAI`:
3. Enter the root (invokeai) directory and create a virtual Python
environment within it named `.venv`. If the command `python`
doesn't work, try `python3`. Note that while you may create the
virtual environment anywhere in the file system, we recommend that
you create it within the root directory as shown here. This makes
it possible for the InvokeAI applications to find the model data
and configuration. If you do not choose to install the virtual
environment inside the root directory, then you **must** set the
`INVOKEAI_ROOT` environment variable in your shell environment, for
example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the
Windows environment variable using the Advanced System Settings dialogue.
Refer to your operating system documentation for details.
```bash
python -m venv ${INVOKEAI_ROOT}/.venv \
--prompt invokeai \
--upgrade-deps \
--copies
source ${INVOKEAI_ROOT}/.venv/bin/activate
```
!!! warning
=== "Linux/Mac"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
You **may** create your virtual environment anywhere on the filesystem.
But IF you choose a location that is *not* inside the `$INVOKEAI_ROOT` directory,
then you must set the `INVOKEAI_ROOT` environment variable in your shell environment,
for example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the Windows environment
variable. Refer to your operating system / shell documentation for the correct way of doing so.
=== "Windows"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
4. Activate the new environment:
=== "Linux/Mac"
```bash
source .venv/bin/activate
```
=== "Windows"
```bash
.venv\script\activate
```
If you get a permissions error at this point, run the command
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
and try `activate` again.
The command-line prompt should change to to show `(.venv)` at the
beginning of the prompt. Note that all the following steps should be
run while inside the INVOKEAI_ROOT directory
5. Make sure that pip is installed in your virtual environment and up to date:
5. Make sure that pip is installed in your virtual environment an up to date:
```bash
python -m pip install --upgrade pip
```
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
6. Install Package
=== "CUDA (NVidia)"
```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
```bash
pip install --use-pep517 .
```
=== "ROCm (AMD)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
Deactivate and reactivate your runtime directory so that the invokeai-specific commands
become available in the environment
=== "Linux/Macintosh"
```bash
deactivate && source .venv/bin/activate
```
```
deactivate && source ${INVOKEAI_ROOT}/.venv/bin/activate
```
=== "Windows"
```bash
deactivate
.venv\Scripts\activate
```
8. Set up the runtime directory
7. Set up the runtime directory
In this step you will initialize your runtime directory with the downloaded
models, model config files, directory for textual inversion embeddings, and
your outputs.
```bash
invokeai-configure
invokeai-configure --root ${INVOKEAI_ROOT}
```
The script `invokeai-configure` will interactively guide you through the
@@ -199,36 +119,35 @@ manager, please follow these steps:
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
process for this is described in [here](050_INSTALLING_MODELS.md).
9. Run the command-line- or the web- interface:
7. Run the command-line- or the web- interface:
From within INVOKEAI_ROOT, activate the environment
(with `source .venv/bin/activate` or `.venv\scripts\activate), and then run
the script `invokeai`. If the virtual environment you selected is NOT inside
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
`--root_dir \path\to\invokeai` to the commands below:
Activate the environment (with `source .venv/bin/activate`), and then run
the script `invokeai`. If you selected a non-default location for the
runtime directory, please specify the path with the `--root_dir` option
(abbreviated below as `--root`):
!!! example ""
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
=== "CLI"
```bash
invokeai
invokeai --root ~/invokeai
```
=== "local Webserver"
```bash
invokeai --web
invokeai --web --root ~/invokeai
```
=== "Public Webserver"
```bash
invokeai --web --host 0.0.0.0
invokeai --web --host 0.0.0.0 --root ~/invokeai
```
If you choose the run the web interface, point your browser at
@@ -236,99 +155,23 @@ manager, please follow these steps:
!!! tip
You can permanently set the location of the runtime directory
by setting the environment variable `INVOKEAI_ROOT` to the
path of the directory. As mentioned previously, this is
*highly recommended** if your virtual environment is located outside of
your runtime directory.
You can permanently set the location of the runtime directory by setting the environment variable `INVOKEAI_ROOT` to the path of the directory. As mentioned previously, this is
**required** if your virtual environment is located outside of your runtime directory.
10. Render away!
8. Render away!
Browse the [features](../features/CLI.md) section to learn about all the
things you can do with InvokeAI.
Note that some GPUs are slow to warm up. In particular, when using an AMD
card with the ROCm driver, you may have to wait for over a minute the first
time you try to generate an image. Fortunately, after the warm-up period
rendering will be fast.
11. Subsequently, to relaunch the script, activate the virtual environment, and
9. Subsequently, to relaunch the script, activate the virtual environment, and
then launch `invokeai` command. If you forget to activate the virtual
environment you will most likeley receive a `command not found` error.
!!! warning
Do not move the runtime directory after installation. The virtual environment will get confused if the directory is moved.
12. Other scripts
The [Textual Inversion](../features/TEXTUAL_INVERSION.md) script can be launched with the command:
```bash
invokeai-ti --gui
```
Similarly, the [Model Merging](../features/MODEL_MERGING.md) script can be launched with the command:
```bash
invokeai-merge --gui
```
Leave off the `--gui` option to run the script using command-line arguments. Pass the `--help` argument
to get usage instructions.
### Developer Install
If you have an interest in how InvokeAI works, or you would like to
add features or bugfixes, you are encouraged to install the source
code for InvokeAI. For this to work, you will need to install the
`git` source code management program. If it is not already installed
on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git)
1. From the command line, run this command:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository.
2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!)
3. Enter the InvokeAI repository directory and run one of these
commands, based on your GPU:
=== "CUDA (NVidia)"
```bash
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
=== "ROCm (AMD)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
Be sure to pass `-e` (for an editable install) and don't forget the
dot ("."). It is part of the command.
You can now run `invokeai` and its related commands. The code will be
read from the repository, so that you can edit the .py source files
and watch the code's behavior change.
4. If you wish to contribute to the InvokeAI project, you are
encouraged to establish a GitHub account and "fork"
https://github.com/invoke-ai/InvokeAI into your own copy of the
repository. You can then use GitHub functions to create and submit
pull requests to contribute improvements to the project.
Please see [Contributing](/index.md#Contributing) for hints
on getting started.
Do not move the runtime directory after installation. The virtual environment has absolute paths in it that get confused if the directory is moved.

View File

@@ -1,125 +0,0 @@
---
title: NVIDIA Cuda / AMD ROCm
---
<figure markdown>
# :simple-nvidia: CUDA | :simple-amd: ROCm
</figure>
In order for InvokeAI to run at full speed, you will need a graphics
card with a supported GPU. InvokeAI supports NVidia cards via the CUDA
driver on Windows and Linux, and AMD cards via the ROCm driver on Linux.
## :simple-nvidia: CUDA
### Linux and Windows Install
If you have used your system for other graphics-intensive tasks, such
as gaming, you may very well already have the CUDA drivers
installed. To confirm, open up a command-line window and type:
```
nvidia-smi
```
If this command produces a status report on the GPU(s) installed on
your system, CUDA is installed and you have no more work to do. If
instead you get "command not found", or similar, then the driver will
need to be installed.
We strongly recommend that you install the CUDA Toolkit package
directly from NVIDIA. **Do not try to install Ubuntu's
nvidia-cuda-toolkit package. It is out of date and will cause
conflicts among the NVIDIA driver and binaries.**
Go to [CUDA Toolkit 11.7
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive),
and use the target selection wizard to choose your operating system,
hardware platform, and preferred installation method (e.g. "local"
versus "network").
This will provide you with a downloadable install file or, depending
on your choices, a recipe for downloading and running a install shell
script. Be sure to read and follow the full installation instructions.
After an install that seems successful, you can confirm by again
running `nvidia-smi` from the command line.
### Linux Install with a Runtime Container
On Linux systems, an alternative to installing CUDA Toolkit directly on
your system is to run an NVIDIA software container that has the CUDA
libraries already in place. This is recommended if you are already
familiar with containerization technologies such as Docker.
For downloads and instructions, visit the [NVIDIA CUDA Container
Runtime Site](https://developer.nvidia.com/nvidia-container-runtime)
### Torch Installation
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/cu117` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
## :simple-amd: ROCm
### Linux Install
AMD GPUs are only supported on Linux platforms due to the lack of a
Windows ROCm driver at the current time. Also be aware that support
for newer AMD GPUs is spotty. Your mileage may vary.
It is possible that the ROCm driver is already installed on your
machine. To test, open up a terminal window and issue the following
command:
```
rocm-smi
```
If you get a table labeled "ROCm System Management Interface" the
driver is installed and you are done. If you get "command not found,"
then the driver needs to be installed.
Go to AMD's [ROCm Downloads
Guide](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation_new.html#installation-methods)
and scroll to the _Installation Methods_ section. Find the subsection
for the install method for your preferred Linux distribution, and
issue the commands given in the recipe.
Annoyingly, the official AMD site does not have a recipe for the most
recent version of Ubuntu, 22.04. However, this [community-contributed
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
to work well.
After installation, please run `rocm-smi` a second time to confirm
that the driver is present and the GPU is recognized. You may need to
do a reboot in order to load the driver.
### Linux Install with a ROCm-docker Container
If you are comfortable with the Docker containerization system, then
you can build a ROCm docker file. The source code and installation
recipes are available
[Here](https://github.com/RadeonOpenCompute/ROCm-docker/blob/master/quick-start.md)
### Torch Installation
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
This will be done automatically for you if you use the installer
script.
Be aware that the torch machine learning library does not seamlessly
interoperate with all AMD GPUs and you may experience garbled images,
black images, or long startup delays before rendering commences. Most
of these issues can be solved by Googling for workarounds. If you have
a problem and find a solution, please post an
[Issue](https://github.com/invoke-ai/InvokeAI/issues) so that other
users benefit and we can update this document.

View File

@@ -16,6 +16,10 @@ title: Installing with Docker
For general use, install locally to leverage your machine's GPU.
!!! tip "For running on a cloud instance/service"
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
## Why containers?
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
@@ -74,40 +78,38 @@ Some Suggestions of variables you may want to change besides the Token:
<figure markdown>
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch |
| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag |
| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository |
| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. |
| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used |
| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development |
| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url |
| Environment-Variable | Default value | Description |
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
</figure>
#### Build the Image
I provided a build script, which is located next to the Dockerfile in
`docker/build.sh`. It can be executed from repository root like this:
I provided a build script, which is located in `docker-build/build.sh` but still
needs to be executed from the Repository root.
```bash
./docker/build.sh
./docker-build/build.sh
```
The build Script not only builds the container, but also creates the docker
volume if not existing yet.
volume if not existing yet, or if empty it will just download the models.
#### Run the Container
After the build process is done, you can run the container via the provided
`docker/run.sh` script
`docker-build/run.sh` script
```bash
./docker/run.sh
./docker-build/run.sh
```
When used without arguments, the container will start the webserver and provide
@@ -117,7 +119,7 @@ also do so.
!!! example "run script example"
```bash
./docker/run.sh "banana sushi" -Ak_lms -S42 -s10
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
```
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
@@ -128,18 +130,16 @@ also do so.
## Running the container on your GPU
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running
the container with an extra environment variable to enable GPU usage and have
the process run much faster:
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
environment variable to enable GPU usage and have the process run much faster:
```bash
GPU_FLAGS=all ./docker/run.sh
GPU_FLAGS=all ./docker-build/run.sh
```
This passes the `--gpus all` to docker and uses the GPU.
If you don't have a GPU (or your host is not yet setup to use it) you will see a
message like this:
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
@@ -147,8 +147,84 @@ You can use the full set of GPU combinations documented here:
https://docs.docker.com/config/containers/resource_constraints/#gpu
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to
choose a specific device identified by a UUID.
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
## Running InvokeAI in the cloud with Docker
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
An advantage of this method is that it does not need any local setup or additional dependencies.
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
### Prerequisites
- a `docker` runtime
- `make` (optional but helps for convenience)
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
### Building and running the image locally
1. Clone this repo and `cd docker-build`
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
- `make configure` (This does *not* require a GPU-capable system)
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
- enter your Huggingface token when prompted
1. `make web`
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
#### Building and running without `make`
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
!!! example "Build the image and configure the runtime directory"
```Shell
cd docker-build
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
```
!!! example "Run the web server"
```Shell
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
```
Access the Web UI at http://localhost:9090
!!! example "Run the InvokeAI interactive CLI"
```
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
```
### Running the image in the cloud
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
1. build this image either in the cloud (you'll need to pull the repo), or locally
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
1. `docker pull` it on your cloud instance
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
1. use either one of the `docker run` commands above, substituting the image name for your own image.
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
The template's `README` provides ample detail, but at a high level, the process is as follows:
1. create a pod using this Docker image
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
1. Run the pod with `sleep infinity` as the Docker command
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
---
@@ -164,12 +240,13 @@ choose a specific device identified by a UUID.
If you're on a **Linux container** the `invoke` script is **automatically
started** and the output dir set to the Docker volume you created earlier.
If you're **directly on macOS follow these startup instructions**. With the
Conda environment activated (`conda activate ldm`), run the interactive
If you're **directly on macOS follow these startup instructions**.
With the Conda environment activated (`conda activate ldm`), run the interactive
interface that combines the functionality of the original scripts `txt2img` and
`img2img`: Use the more accurate but VRAM-intensive full precision math because
half-precision requires autocast and won't work. By default the images are saved
in `outputs/img-samples/`.
`img2img`:
Use the more accurate but VRAM-intensive full precision math because
half-precision requires autocast and won't work.
By default the images are saved in `outputs/img-samples/`.
```Shell
python3 scripts/invoke.py --full_precision
@@ -185,9 +262,9 @@ invoke> q
### Text to Image
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
image. This will let you know that everything is set up correctly. Then increase
steps to 100 or more for good (but slower) results. The prompt can be in quotes
or not.
image. This will let you know that everything is set up correctly.
Then increase steps to 100 or more for good (but slower) results.
The prompt can be in quotes or not.
```Shell
invoke> The hulk fighting with sheldon cooper -s5 -n1
@@ -200,9 +277,10 @@ You'll need to experiment to see if face restoration is making it better or
worse for your specific prompt.
If you're on a container the output is set to the Docker volume. You can copy it
wherever you want. You can download it from the Docker Desktop app, Volumes,
my-vol, data. Or you can copy it from your Mac terminal. Keep in mind
`docker cp` can't expand `*.png` so you'll need to specify the image file name.
wherever you want.
You can download it from the Docker Desktop app, Volumes, my-vol, data.
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
`*.png` so you'll need to specify the image file name.
On your host Mac (you can use the name of any container that mounted the
volume):

View File

@@ -4,347 +4,249 @@ title: Installing Models
# :octicons-paintbrush-16: Installing Models
## Checkpoint and Diffusers Models
## Model Weight Files
The model checkpoint files ('\*.ckpt') are the Stable Diffusion
"secret sauce". They are the product of training the AI on millions of
captioned images gathered from multiple sources.
The model weight files ('\*.ckpt') are the Stable Diffusion "secret sauce". They
are the product of training the AI on millions of captioned images gathered from
multiple sources.
Originally there was only a single Stable Diffusion weights file,
which many people named `model.ckpt`. Now there are dozens or more
that have been fine tuned to provide particulary styles, genres, or
other features. In addition, there are several new formats that
improve on the original checkpoint format: a `.safetensors` format
which prevents malware from masquerading as a model, and `diffusers`
models, the most recent innovation.
Originally there was only a single Stable Diffusion weights file, which many
people named `model.ckpt`. Now there are dozens or more that have been "fine
tuned" to provide particulary styles, genres, or other features. InvokeAI allows
you to install and run multiple model weight files and switch between them
quickly in the command-line and web interfaces.
InvokeAI supports all three formats but strongly prefers the
`diffusers` format. These are distributed as directories containing
multiple subfolders, each of which contains a different aspect of the
model. The advantage of this is that the models load from disk really
fast. Another advantage is that `diffusers` models are supported by a
large and active set of open source developers working at and with
HuggingFace organization, and improvements in both rendering quality
and performance are being made at a rapid pace. Among other features
is the ability to download and install a `diffusers` model just by
providing its HuggingFace repository ID.
While InvokeAI will continue to support `.ckpt` and `.safetensors`
models for the near future, these are deprecated and support will
likely be withdrawn at some point in the not-too-distant future.
This manual will guide you through installing and configuring model
weight files and converting legacy `.ckpt` and `.safetensors` files
into performant `diffusers` models.
This manual will guide you through installing and configuring model weight
files.
## Base Models
InvokeAI comes with support for a good set of starter models. You'll
find them listed in the master models file
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
subset that are currently installed are found in
`configs/models.yaml`. The current list is:
InvokeAI comes with support for a good initial set of models listed in the model
configuration file `configs/models.yaml`. They are:
| Model | HuggingFace Repo ID | Description | URL
| Model | Weight File | Description | DOWNLOAD FROM |
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
| `<all models>` | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
Note that these files are covered by an "Ethical AI" license which forbids
certain uses. When you initially download them, you are asked to
accept the license terms.
certain uses. You will need to create an account on the Hugging Face website and
accept the license terms before you can access the files.
The predefined configuration file for InvokeAI (located at
`configs/models.yaml`) provides entries for each of these weights files.
`stable-diffusion-1.5` is the default model used, and we strongly recommend that
you install this weights file if nothing else.
## Community-Contributed Models
There are too many to list here and more are being contributed every
day. [HuggingFace](https://huggingface.co/models?library=diffusers)
is a great resource for diffusers models, and is also the home of a
[fast-growing repository](https://huggingface.co/sd-concepts-library)
of embedding (".bin") models that add subjects and/or styles to your
images. The latter are automatically installed on the fly when you
include the text `<concept-name>` in your prompt. See [Concepts
Library](../features/CONCEPTS.md) for more information.
There are too many to list here and more are being contributed every day.
Hugging Face maintains a
[fast-growing repository](https://huggingface.co/sd-concepts-library) of
fine-tune (".bin") models that can be imported into InvokeAI by passing the
`--embedding_path` option to the `invoke.py` command.
Another popular site for community-contributed models is
[CIVITAI](https://civitai.com). This extensive site currently supports
only `.safetensors` and `.ckpt` models, but they can be easily loaded
into InvokeAI and/or converted into optimized `diffusers` models. Be
aware that CIVITAI hosts many models that generate NSFW content.
[This page](https://rentry.org/sdmodels) hosts a large list of official and
unofficial Stable Diffusion models and where they can be obtained.
## Installation
There are multiple ways to install and manage models:
There are three ways to install weights files:
1. The `invokeai-configure` script which will download and install them for you.
1. During InvokeAI installation, the `invokeai-configure` script can download
them for you.
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
models files.
2. You can use the command-line interface (CLI) to import, configure and modify
new models files.
3. The web interface (WebUI) has a GUI for importing and managing
models.
3. You can download the files manually and add the appropriate entries to
`models.yaml`.
### Installation via `invokeai-configure`
From the `invoke` launcher, choose option (6) "re-run the configure
script to download new models." This will launch the same script that
prompted you to select models at install time. You can use this to add
models that you skipped the first time around. It is all right to
specify a model that was previously downloaded; the script will just
confirm that the files are complete.
This is the most automatic way. Run `invokeai-configure` from the
console. It will ask you to select which models to download and lead you through
the steps of setting up a Hugging Face account if you haven't done so already.
To start, run `invokeai-configure` from within the InvokeAI:
directory
!!! example ""
```text
Loading Python libraries...
** INTRODUCTION **
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
and other large models that are needed for text to image generation. At any point you may interrupt
this program and resume later.
** WEIGHT SELECTION **
Would you like to download the Stable Diffusion model weights now? [y]
Choose the weight file(s) you wish to download. Before downloading you
will be given the option to view and change your selections.
[1] stable-diffusion-1.5:
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
Download? [y]
[2] inpainting-1.5:
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
Download? [y]
[3] stable-diffusion-1.4:
The original Stable Diffusion version 1.4 weight file (4.27 GB)
Download? [n] n
[4] waifu-diffusion-1.3:
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
Download? [n] y
[5] ft-mse-improved-autoencoder-840000:
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
Download? [y] y
The following weight files will be downloaded:
[1] stable-diffusion-1.5*
[2] inpainting-1.5
[4] waifu-diffusion-1.3
[5] ft-mse-improved-autoencoder-840000
*default
Ok to download? [y]
** LICENSE AGREEMENT FOR WEIGHT FILES **
1. To download the Stable Diffusion weight files you need to read and accept the
CreativeML Responsible AI license. If you have not already done so, please
create an account using the "Sign Up" button:
https://huggingface.co
You will need to verify your email address as part of the HuggingFace
registration process.
2. After creating the account, login under your account and accept
the license terms located here:
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
Press <enter> when you are ready to continue:
...
```
When the script is complete, you will find the downloaded weights files in
`models/ldm/stable-diffusion-v1` and a matching configuration file in
`configs/models.yaml`.
You can run the script again to add any models you didn't select the first time.
Note that as a safety measure the script will _never_ remove a
previously-installed weights file. You will have to do this manually.
### Installation via the CLI
You can install a new model, including any of the community-supported ones, via
the command-line client's `!import_model` command.
#### Installing `.ckpt` and `.safetensors` models
1. First download the desired model weights file and place it under
`models/ldm/stable-diffusion-v1/`. You may rename the weights file to
something more memorable if you wish. Record the path of the weights file
(e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
If the model is already downloaded to your local disk, use
`!import_model /path/to/file.ckpt` to load it. For example:
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
```bash
invoke> !import_model C:/Users/fred/Downloads/martians.safetensors
```
3. At the `invoke>` command-line, enter the command
`!import_model <path to model>`. For example:
!!! tip "Forward Slashes"
On Windows systems, use forward slashes rather than backslashes
in your file paths.
If you do use backslashes,
you must double them like this:
`C:\\Users\\fred\\Downloads\\martians.safetensors`
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
Alternatively you can directly import the file using its URL:
!!! tip "the CLI supports file path autocompletion"
```bash
invoke> !import_model https://example.org/sd_models/martians.safetensors
```
For this to work, the URL must not be password-protected. Otherwise
you will receive a 404 error.
When you import a legacy model, the CLI will ask you a few questions
about the model, including what size image it was trained on (usually
512x512), what name and description you wish to use for it, what
configuration file to use for it (usually the default
`v1-inference.yaml`), whether you'd like to make this model the
default at startup time, and whether you would like to install a
custom VAE (variable autoencoder) file for the model. For recent
models, the answer to the VAE question is usually "no," but it won't
hurt to answer "yes".
#### Installing `diffusers` models
You can install a `diffusers` model from the HuggingFace site using
`!import_model` and the HuggingFace repo_id for the model:
```bash
invoke> !import_model andite/anything-v4.0
```
Alternatively, you can download the model to disk and import it from
there. The model may be distributed as a ZIP file, or as a Git
repository:
```bash
invoke> !import_model C:/Users/fred/Downloads/andite--anything-v4.0
```
!!! tip "The CLI supports file path autocompletion"
Type a bit of the path name and hit ++tab++ in order to get a choice of
possible completions.
!!! tip "On Windows, you can drag model files onto the command-line"
Once you have typed in `!import_model `, you can drag the
model file or directory onto the command-line to insert the model path. This way, you don't need to
type it or copy/paste. However, you will need to reverse or
double backslashes as noted above.
!!! tip "on Windows, you can drag model files onto the command-line"
Before installing, the CLI will ask you for a short name and
description for the model, whether to make this the default model that
is loaded at InvokeAI startup time, and whether to replace its
VAE. Generally the answer to the latter question is "no".
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
onto the command-line to insert the model path. This way, you don't need to
type it or copy/paste.
### Converting legacy models into `diffusers`
4. Follow the wizard's instructions to complete installation as shown in the
example here:
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
models file into `diffusers` and install it.This will enable the model
to load and run faster without loss of image quality.
!!! example ""
The usage is identical to `!import_model`. You may point the command
to either a downloaded model file on disk, or to a (non-password
protected) URL:
```text
invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
>> Model import in process. Please enter the values needed to configure this model:
```bash
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
```
Name for this model: arabian-nights
Description of this model: Arabian Nights Fine Tune v1.0
Configuration file for this model: configs/stable-diffusion/v1-inference.yaml
Default image width: 512
Default image height: 512
>> New configuration:
arabian-nights:
config: configs/stable-diffusion/v1-inference.yaml
description: Arabian Nights Fine Tune v1.0
height: 512
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
width: 512
OK to import [n]? y
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
```
After a successful conversion, the CLI will offer you the option of
deleting the original `.ckpt` or `.safetensors` file.
If you've previously installed the fine-tune VAE file
`vae-ft-mse-840000-ema-pruned.ckpt`, the wizard will also ask you if you want to
add this VAE to the model.
### Optimizing a previously-installed model
The appropriate entry for this model will be added to `configs/models.yaml` and
it will be available to use in the CLI immediately.
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
file and wish to convert it into a `diffusers` model, you can do this
without re-downloading and converting the original file using the
`!optimize_model` command. Simply pass the short name of an existing
installed model:
```bash
invoke> !optimize_model martians-v1.0
```
The model will be converted into `diffusers` format and replace the
previously installed version. You will again be offered the
opportunity to delete the original `.ckpt` or `.safetensors` file.
### Related CLI Commands
There are a whole series of additional model management commands in
the CLI that you can read about in [Command-Line
Interface](../features/CLI.md). These include:
* `!models` - List all installed models
* `!switch <model name>` - Switch to the indicated model
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
* `!del_model <model name>` - Delete the indicated model
### Manually editing `configs/models.yaml`
The CLI has additional commands for switching among, viewing, editing, deleting
the available models. These are described in
[Command Line Client](../features/CLI.md#model-selection-and-importation), but
the two most frequently-used are `!models` and `!switch <name of model>`. The
first prints a table of models that InvokeAI knows about and their load status.
The second will load the requested model and lets you switch back and forth
quickly among loaded models.
### Manually editing of `configs/models.yaml`
If you are comfortable with a text editor then you may simply edit `models.yaml`
directly.
You will need to download the desired `.ckpt/.safetensors` file and
place it somewhere on your machine's filesystem. Alternatively, for a
`diffusers` model, record the repo_id or download the whole model
directory. Then using a **text** editor (e.g. the Windows Notepad
application), open the file `configs/models.yaml`, and add a new
stanza that follows this model:
First you need to download the desired .ckpt file and place it in
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the previous
section. Record the path to the weights file, e.g.
`models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
#### A legacy model
A legacy `.ckpt` or `.safetensors` entry will look like this:
Then using a **text** editor (e.g. the Windows Notepad application), open the
file `configs/models.yaml`, and add a new stanza that follows this model:
```yaml
arabian-nights-1.0:
description: A great fine-tune in Arabian Nights style
weights: ./path/to/arabian-nights-1.0.ckpt
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
config: ./configs/stable-diffusion/v1-inference.yaml
format: ckpt
width: 512
height: 512
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
default: false
```
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
#### A diffusers model
A stanza for a `diffusers` model will look like this for a HuggingFace
model with a repository ID:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
repo_id: captahab/arabian-nights-1.1
format: diffusers
default: true
```
And for a downloaded directory:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
path: /path/to/captahab-arabian-nights-1.1
format: diffusers
default: true
```
There is additional syntax for indicating an external VAE to use with
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
After you save the modified `models.yaml` file relaunch
`invokeai`. The new model will now be available for your use.
### Installation via the WebUI
To access the WebUI Model Manager, click on the button that looks like
a cute in the upper right side of the browser screen. This will bring
up a dialogue that lists the models you have already installed, and
allows you to load, delete or edit them:
<figure markdown>
![model-manager](../assets/installing-models/webui-models-1.png)
</figure>
To add a new model, click on **+ Add New** and select to either a
checkpoint/safetensors model, or a diffusers model:
<figure markdown>
![model-manager-add-new](../assets/installing-models/webui-models-2.png)
</figure>
In this example, we chose **Add Diffusers**. As shown in the figure
below, a new dialogue prompts you to enter the name to use for the
model, its description, and either the location of the `diffusers`
model on disk, or its Repo ID on the HuggingFace web site. If you
choose to enter a path to disk, the system will autocomplete for you
as you type:
<figure markdown>
![model-manager-add-diffusers](../assets/installing-models/webui-models-3.png)
</figure>
Press **Add Model** at the bottom of the dialogue (scrolled out of
site in the figure), and the model will be downloaded, imported, and
registered in `models.yaml`.
The **Add Checkpoint/Safetensor Model** option is similar, except that
in this case you can choose to scan an entire folder for
checkpoint/safetensors files to import. Simply type in the path of the
directory and press the "Search" icon. This will display the
`.ckpt` and `.safetensors` found inside the directory and its
subfolders, and allow you to choose which ones to import:
<figure markdown>
![model-manager-add-checkpoint](../assets/installing-models/webui-models-4.png)
</figure>
## Model Management Startup Options
The `invoke` launcher and the `invokeai` script accept a series of
command-line arguments that modify InvokeAI's behavior when loading
models. These can be provided on the command line, or added to the
InvokeAI root directory's `invokeai.init` initialization file.
The arguments are:
* `--model <model name>` -- Start up with the indicated model loaded
* `--ckpt_convert` -- When a checkpoint/safetensors model is loaded, convert it into a `diffusers` model in memory. This does not permanently save the converted model to disk.
* `--autoconvert <path/to/directory>` -- Scan the indicated directory path for new checkpoint/safetensors files, convert them into `diffusers` models, and import them into InvokeAI.
Here is an example of providing an argument on the command line using
the `invoke.sh` launch script:
```bash
invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
```
And here is what the same argument looks like in `invokeai.init`:
```
--outdir="/home/fred/invokeai/outputs
--no-nsfw_checker
--autoconvert /home/fred/stable-diffusion-checkpoints
```
| name | description |
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
| description | Any description that you want to add to the model to remind you what it is. |
| weights | Relative path to the .ckpt weights file for this model. |
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `invokeai-configure` script. |
| vae | If you want to add a VAE file to the model, then enter its path here. |
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
available for your use.

View File

@@ -3,19 +3,7 @@ title: Overview
---
We offer several ways to install InvokeAI, each one suited to your
experience and preferences. We suggest that everyone start by
reviewing the
[hardware](010_INSTALL_AUTOMATED.md#hardware_requirements) and
[software](010_INSTALL_AUTOMATED.md#software_requirements)
requirements, as they are the same across each install method. Then
pick the install method most suitable to your level of experience and
needs.
See the [troubleshooting
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
install guide for frequently-encountered installation issues.
## Main Application
experience and preferences.
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
@@ -30,8 +18,8 @@ install guide for frequently-encountered installation issues.
InvokeAI and its dependencies. We offer two recipes: one suited to
those who prefer the `conda` tool, and one suited to those who prefer
`pip` and Python virtual environments. In our hands the pip install
is faster and more reliable, but your mileage may vary.
Note that the conda installation method is currently deprecated and
is faster and more reliable, but your mileage may vary.
Note that the conda installation method is currently deprecated and
will not be supported at some point in the future.
This method is recommended for users who have previously used `conda`
@@ -45,10 +33,3 @@ install guide for frequently-encountered installation issues.
InvokeAI and its dependencies. This method is recommended for
individuals with experience with Docker containers and understand
the pluses and minuses of a container-based install.
## Quick Guides
* [Installing CUDA and ROCm Drivers](./030_INSTALL_CUDA_AND_ROCM.md)
* [Installing XFormers](./070_INSTALL_XFORMERS.md)
* [Installing PyPatchMatch](./060_INSTALL_PATCHMATCH.md)
* [Installing New Models](./050_INSTALLING_MODELS.md)

View File

@@ -14,13 +14,12 @@ fi
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
PATCH=""
VERSION="v${VERSION}${PATCH}"
LATEST_TAG="v2.3-latest"
echo Building installer for version $VERSION
echo "Be certain that you're in the 'installer' directory before continuing."
read -p "Press any key to continue, or CTRL-C to exit..."
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
read -e -p "Commit and tag this repo with ${VERSION} and 'latest-2.3'? [n]: " input
RESPONSE=${input:='n'}
if [ "$RESPONSE" == 'y' ]; then
git commit -a
@@ -29,9 +28,8 @@ if [ "$RESPONSE" == 'y' ]; then
echo "Existing/invalid tag"
exit -1
fi
git push origin :refs/tags/$LATEST_TAG
git tag -fa $LATEST_TAG
git push origin :refs/tags/latest-2.3
git tag -fa latest-2.3
fi
# ----------------------
@@ -56,12 +54,12 @@ rm -rf InvokeAI-Installer
# copy content
mkdir InvokeAI-Installer
for f in templates lib *.txt *.reg; do
for f in templates *.py *.txt *.reg; do
cp -r ${f} InvokeAI-Installer/
done
# Move the wheel
mv dist/*.whl InvokeAI-Installer/lib/
mv dist/*.whl InvokeAI-Installer/
# Install scripts
# Mac/Linux
@@ -75,6 +73,17 @@ cp WinLongPathsEnabled.reg InvokeAI-Installer/
# Zip everything up
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
# Updater
mkdir tmp
cp templates/update.sh.in tmp/update.sh
cp templates/update.bat.in tmp/update.bat
chmod +x tmp/update.sh
chmod +x tmp/update.bat
cd tmp
zip InvokeAI-updater-$VERSION.zip update.sh update.bat
cd ..
mv tmp/InvokeAI-updater-$VERSION.zip .
# clean up
rm -rf InvokeAI-Installer tmp dist

View File

@@ -66,7 +66,8 @@ del /q .tmp1 .tmp2
@rem -------------- Install and Configure ---------------
call python .\lib\main.py
call python main.py
@rem ------------------------ Subroutines ---------------
@rem routine to do comparison of semantic version numbers

25
installer/install.sh.in Executable file → Normal file
View File

@@ -3,28 +3,5 @@
# make sure we are not already in a venv
# (don't need to check status)
deactivate >/dev/null 2>&1
scriptdir=$(dirname "$0")
cd $scriptdir
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
MINIMUM_PYTHON_VERSION=3.9.0
PYTHON=""
for candidate in python3.10 python3.9 python3 python python3.11 ; do
if ppath=`which $candidate`; then
python_version=$($ppath -V | awk '{ print $2 }')
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
PYTHON=$ppath
break
fi
fi
done
if [ -z "$PYTHON" ]; then
echo "A suitable Python interpreter could not be found"
echo "Please install Python 3.9 or higher before running this script. See instructions at $INSTRUCTIONS for help."
read -p "Press any key to exit"
exit -1
fi
exec $PYTHON ./lib/main.py ${@}
exec python3 $(dirname $0)/main.py ${@}

View File

@@ -38,8 +38,7 @@ class Installer:
self.reqs = INSTALLER_REQS
self.preflight()
if os.getenv("VIRTUAL_ENV") is not None:
print("A virtual environment is already activated. Please 'deactivate' before installation.")
sys.exit(-1)
raise NotImplementedError("A virtual environment is already activated. Please 'deactivate' before installation.")
self.bootstrap()
def preflight(self) -> None:
@@ -249,7 +248,6 @@ class InvokeAiInstance:
"--require-virtualenv",
"torch",
"torchvision",
"--force-reinstall",
"--find-links" if find_links is not None else None,
find_links,
"--extra-index-url" if extra_index_url is not None else None,
@@ -285,7 +283,7 @@ class InvokeAiInstance:
if FF_USE_LOCAL_WHEEL:
# if no wheel, try to do a source install before giving up
try:
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
src = str(next(Path.cwd().glob("InvokeAI-*.whl")))
except StopIteration:
try:
src = Path(__file__).parents[1].expanduser().resolve()
@@ -326,7 +324,6 @@ class InvokeAiInstance:
Configure the InvokeAI runtime directory
"""
# set sys.argv to a consistent state
new_argv = [sys.argv[0]]
for i in range(1,len(sys.argv)):
el = sys.argv[i]
@@ -346,6 +343,9 @@ class InvokeAiInstance:
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script.
# this may change in the future with config refactoring!
# set sys.argv to a consistent state
invokeai_configure.main()
def install_user_scripts(self):
@@ -359,7 +359,7 @@ class InvokeAiInstance:
scripts = ['invoke']
for script in scripts:
src = Path(__file__).parent / '..' / "templates" / f"{script}.{ext}.in"
src = Path(__file__).parent / "templates" / f"{script}.{ext}.in"
dest = self.runtime / f"{script}.{ext}"
shutil.copy(src, dest)
os.chmod(dest, 0o0755)
@@ -446,7 +446,6 @@ def get_torch_source() -> (Union[str, None],str):
url = "https://download.pytorch.org/whl/cpu"
if device == 'cuda':
url = 'https://download.pytorch.org/whl/cu117'
optional_modules = '[xformers]'
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13

View File

@@ -9,9 +9,10 @@ from pathlib import Path
from prompt_toolkit import prompt
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.validation import Validator
from rich import box, print
from rich.console import Console, Group, group
from rich.console import Console, Group
from rich.panel import Panel
from rich.prompt import Confirm
from rich.style import Style
@@ -36,21 +37,17 @@ else:
def welcome():
@group()
def text():
if (platform_specific := _platform_specific_help()) != "":
yield platform_specific
yield ""
yield Text.from_markup("Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", justify="center")
console.rule()
print(
Panel(
title="[bold wheat1]Welcome to the InvokeAI Installer",
renderable=text(),
renderable=Text(
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry.",
justify="center",
),
box=box.DOUBLE,
expand=True,
width=80,
expand=False,
padding=(1, 2),
style=Style(bgcolor="grey23", color="orange1"),
subtitle=f"[bold grey39]{OS}-{ARCH}",
@@ -203,7 +200,7 @@ def graphical_accelerator():
[
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
"",
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
"See [steel_blue3]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
"",
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
]
@@ -297,16 +294,3 @@ def introduction() -> None:
)
)
console.line(2)
def _platform_specific_help()->str:
if OS == "Darwin":
text = Text.from_markup("""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""")
elif OS == "Windows":
text = Text.from_markup("""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
enable long path support on your system.
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""")
else:
text = ""
return text

View File

@@ -13,8 +13,7 @@ echo 3. run textual inversion training
echo 4. merge models (diffusers type only)
echo 5. re-run the configure script to download new models
echo 6. open the developer console
echo 7. command-line help
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
set /P restore="Please enter 1, 2, 3, 4 or 5: [5] "
if not defined restore set restore=2
IF /I "%restore%" == "1" (
echo Starting the InvokeAI command-line..
@@ -43,11 +42,6 @@ IF /I "%restore%" == "1" (
echo *************************
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k
) ELSE IF /I "%restore%" == "7" (
echo Displaying command line help...
python .venv\Scripts\invokeai.exe --help %*
pause
exit /b
) ELSE (
echo Invalid selection
pause

View File

@@ -47,11 +47,11 @@ if [ "$0" != "bash" ]; then
;;
3)
echo "Starting Textual Inversion:"
exec invokeai-ti --gui $@
exec textual_inversion --gui $@
;;
4)
echo "Merging Models:"
exec invokeai-merge --gui $@
exec merge_models --gui $@
;;
5)
echo "Developer Console:"

View File

@@ -626,10 +626,9 @@ class InvokeAIWebServer:
printable_parameters["init_mask"][:64] + "..."
)
print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
print(f'>> ESRGAN Parameters: {esrgan_parameters}')
print(f'>> Facetool Parameters: {facetool_parameters}')
print(
f">> Image generation requested: {printable_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}"
)
self.generate_images(
generation_parameters,
esrgan_parameters,
@@ -1155,7 +1154,7 @@ class InvokeAIWebServer:
image, os.path.basename(path), self.thumbnail_image_path
)
print(f'\n\n>> Image generated: "{path}"\n')
print(f'>> Image generated: "{path}"')
self.write_log_message(f'[Generated] "{path}": {command}')
if progress.total_iterations > progress.current_iteration:
@@ -1194,6 +1193,8 @@ class InvokeAIWebServer:
progress.set_current_iteration(progress.current_iteration + 1)
print(generation_parameters)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
@@ -1208,18 +1209,12 @@ class InvokeAIWebServer:
)
except KeyboardInterrupt:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
self.socketio.emit("processingCanceled")
raise
except CanceledException:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
self.socketio.emit("processingCanceled")
pass
except Exception as e:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
print(e)
self.socketio.emit("error", {"message": (str(e))})
print("\n")
@@ -1227,12 +1222,6 @@ class InvokeAIWebServer:
traceback.print_exc()
print("\n")
def empty_cuda_cache(self):
if self.generate.device.type == "cuda":
import torch.cuda
torch.cuda.empty_cache()
def parameters_to_generated_image_metadata(self, parameters):
try:
# top-level metadata minus `image` or `images`
@@ -1316,6 +1305,8 @@ class InvokeAIWebServer:
rfc_dict["variations"] = variations
print(parameters)
if rfc_dict["type"] == "img2img":
rfc_dict["strength"] = parameters["strength"]
rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant

View File

@@ -1,5 +1,5 @@
stable-diffusion-1.5:
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
description: Stable Diffusion version 1.5 weight file (4.27 GB)
repo_id: runwayml/stable-diffusion-v1-5
format: diffusers
vae:
@@ -7,14 +7,14 @@ stable-diffusion-1.5:
recommended: True
default: True
inpainting-1.5:
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: True
dreamlike-diffusion-1.0:
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art, diffusers version (2.13 BG)
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art
format: diffusers
repo_id: dreamlike-art/dreamlike-diffusion-1.0
vae:
@@ -49,8 +49,9 @@ nitro-diffusion-1.0:
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False
trinart-2.0:
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures, diffusers version (2.13 GB)
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures (2.13 GB)
repo_id: naclbit/trinart_stable_diffusion_v2
format: diffusers
vae:

View File

@@ -1,13 +0,0 @@
{
"plugins": [
[
"transform-imports",
{
"lodash": {
"transform": "lodash/${member}",
"preventFullImport": true
}
}
]
]
}

View File

@@ -1,5 +0,0 @@
dist/
.husky/
node_modules/
patches/
public/

View File

@@ -0,0 +1,13 @@
module.exports = {
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:react-hooks/recommended',
],
parser: '@typescript-eslint/parser',
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
root: true,
rules: {
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
},
};

View File

@@ -1,40 +0,0 @@
module.exports = {
env: {
browser: true,
es6: true,
node: true,
},
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:react/recommended',
'plugin:react-hooks/recommended',
'plugin:prettier/recommended',
'plugin:react/jsx-runtime',
],
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaFeatures: {
jsx: true,
},
ecmaVersion: 2018,
sourceType: 'module',
},
plugins: ['react', '@typescript-eslint', 'eslint-plugin-react-hooks'],
root: true,
rules: {
'react-hooks/exhaustive-deps': 'error',
'no-var': 'error',
'brace-style': 'error',
'prefer-template': 'error',
radix: 'error',
'space-before-blocks': 'error',
'import/prefer-default-export': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
},
settings: {
react: {
version: 'detect',
},
},
};

View File

@@ -23,6 +23,3 @@ dist-ssr
*.njsproj
*.sln
*.sw?
# build stats
stats.html

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
cd invokeai/frontend/ && npx run lint

View File

@@ -1,5 +0,0 @@
dist/
.husky/
node_modules/
patches/
public/

View File

@@ -1,6 +0,0 @@
module.exports = {
trailingComma: 'es5',
tabWidth: 2,
semi: true,
singleQuote: true,
};

View File

@@ -1,20 +1,28 @@
# InvokeAI UI dev setup
# Stable Diffusion Web UI
The UI is in `invokeai/frontend`.
## Run
## Environment set up
- `python scripts/dream.py --web` serves both frontend and backend at
http://localhost:9090
Install [node](https://nodejs.org/en/download/) (includes npm) and
## Evironment
Install [node](https://nodejs.org/en/download/) (includes npm) and optionally
[yarn](https://yarnpkg.com/getting-started/install).
From `invokeai/frontend/` run `yarn install` to get everything set up.
From `frontend/` run `npm install` / `yarn install` to install the frontend
packages.
## Dev
1. Start the dev server: `yarn dev`
2. Start the InvokeAI UI per usual: `invokeai --web`
3. Point your browser to the dev server address e.g. `http://localhost:5173/`
1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server.
2. Run `python scripts/dream.py --web`.
3. Navigate to the dev server address e.g. `http://localhost:5173/`.
To build for dev: `yarn build-dev`
To build for dev: `npm build-dev` / `yarn build-dev`
To build for production: `yarn build`
To build for production: `npm build` / `yarn build`
## TODO
- Search repo for "TODO"

View File

Before

Width:  |  Height:  |  Size: 116 KiB

After

Width:  |  Height:  |  Size: 116 KiB

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 43 KiB

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,16 +1,23 @@
<!DOCTYPE html>
<html lang="en">
<head>
<script type="module" crossorigin src="./assets/polyfills.1ff60148.js"></script>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-8606d352.js"></script>
<link rel="stylesheet" href="./assets/index-b0bf79f4.css">
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
<script type="module" crossorigin src="./assets/index.2a9ecaa6.js"></script>
<link rel="stylesheet" href="./assets/index.8badc8b4.css">
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
</head>
<body>
<div id="root"></div>
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-8f3bb5f8.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
</body>
</html>

View File

@@ -21,7 +21,6 @@
"langSpanish": "Spanish",
"langJapanese": "Japanese",
"langDutch": "Dutch",
"langUkranian": "Ukranian",
"text2img": "Text To Image",
"img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas",

View File

@@ -21,7 +21,6 @@
"langSpanish": "Spanish",
"langJapanese": "Japanese",
"langDutch": "Dutch",
"langUkranian": "Ukranian",
"text2img": "Text To Image",
"img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas",

View File

@@ -1,62 +1 @@
{
"hotkeysLabel": "Raccourcis clavier",
"themeLabel": "Thème",
"languagePickerLabel": "Sélecteur de langue",
"reportBugLabel": "Signaler un bug",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Paramètres",
"darkTheme": "Sombre",
"lightTheme": "Clair",
"greenTheme": "Vert",
"langEnglish": "Anglais",
"langRussian": "Russe",
"langItalian": "Italien",
"langBrPortuguese": "Portugais (Brésilien)",
"langGerman": "Allemand",
"langPortuguese": "Portugais",
"langFrench": "Français",
"langPolish": "Polonais",
"langSimplifiedChinese": "Chinois simplifié",
"langSpanish": "Espagnol",
"langJapanese": "Japonais",
"langDutch": "Néerlandais",
"text2img": "Texte en image",
"img2img": "Image en image",
"unifiedCanvas": "Canvas unifié",
"nodes": "Nœuds",
"nodesDesc": "Un système basé sur les nœuds pour la génération d'images est actuellement en développement. Restez à l'écoute pour des mises à jour à ce sujet.",
"postProcessing": "Post-traitement",
"postProcessDesc1": "Invoke AI offre une grande variété de fonctionnalités de post-traitement. Le redimensionnement d'images et la restauration de visages sont déjà disponibles dans la WebUI. Vous pouvez y accéder à partir du menu Options avancées des onglets Texte en image et Image en image. Vous pouvez également traiter les images directement en utilisant les boutons d'action d'image ci-dessus l'affichage d'image actuel ou dans le visualiseur.",
"postProcessDesc2": "Une interface utilisateur dédiée sera bientôt disponible pour faciliter les workflows de post-traitement plus avancés.",
"postProcessDesc3": "L'interface en ligne de commande d'Invoke AI offre diverses autres fonctionnalités, notamment Embiggen.",
"training": "Formation",
"trainingDesc1": "Un workflow dédié pour former vos propres embeddings et checkpoints en utilisant Textual Inversion et Dreambooth depuis l'interface web.",
"trainingDesc2": "InvokeAI prend déjà en charge la formation d'embeddings personnalisés en utilisant Textual Inversion en utilisant le script principal.",
"upload": "Télécharger",
"close": "Fermer",
"load": "Charger",
"back": "Retour",
"statusConnected": "Connecté",
"statusDisconnected": "Déconnecté",
"statusError": "Erreur",
"statusPreparing": "Préparation",
"statusProcessingCanceled": "Traitement Annulé",
"statusProcessingComplete": "Traitement Terminé",
"statusGenerating": "Génération",
"statusGeneratingTextToImage": "Génération Texte vers Image",
"statusGeneratingImageToImage": "Génération Image vers Image",
"statusGeneratingInpainting": "Génération de Réparation",
"statusGeneratingOutpainting": "Génération de Completion",
"statusGenerationComplete": "Génération Terminée",
"statusIterationComplete": "Itération Terminée",
"statusSavingImage": "Sauvegarde de l'Image",
"statusRestoringFaces": "Restauration des Visages",
"statusRestoringFacesGFPGAN": "Restauration des Visages (GFPGAN)",
"statusRestoringFacesCodeFormer": "Restauration des Visages (CodeFormer)",
"statusUpscaling": "Mise à Échelle",
"statusUpscalingESRGAN": "Mise à Échelle (ESRGAN)",
"statusLoadingModel": "Chargement du Modèle",
"statusModelChanged": "Modèle Changé"
}
{}

View File

@@ -19,8 +19,6 @@
"langPolish": "Polacco",
"langSimplifiedChinese": "Cinese semplificato",
"langSpanish": "Spagnolo",
"langJapanese": "Giapponese",
"langDutch": "Olandese",
"text2img": "Testo a Immagine",
"img2img": "Immagine a Immagine",
"unifiedCanvas": "Tela unificata",
@@ -36,7 +34,6 @@
"upload": "Caricamento",
"close": "Chiudi",
"load": "Carica",
"back": "Indietro",
"statusConnected": "Collegato",
"statusDisconnected": "Disconnesso",
"statusError": "Errore",

View File

@@ -1,53 +0,0 @@
{
"hotkeysLabel": арячi клавіші",
"themeLabel": "Тема",
"languagePickerLabel": "Мова",
"reportBugLabel": "Повідомити про помилку",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Налаштування",
"darkTheme": "Темна",
"lightTheme": "Світла",
"greenTheme": "Зелена",
"langEnglish": "Англійська",
"langRussian": "Російська",
"langItalian": "Iталійська",
"langPortuguese": "Португальська",
"langFrench": "Французька",
"text2img": "Зображення із тексту (text2img)",
"img2img": "Зображення із зображення (img2img)",
"unifiedCanvas": "Універсальне полотно",
"nodes": "Вузли",
"nodesDesc": "Система генерації зображень на основі нодів (вузлів) вже розробляється. Слідкуйте за новинами про цю чудову функцію.",
"postProcessing": "Постобробка",
"postProcessDesc1": "Invoke AI пропонує широкий спектр функцій постобробки. Збільшення зображення (upscale) та відновлення облич вже доступні в інтерфейсі. Отримайте доступ до них з меню 'Додаткові параметри' на вкладках 'Зображення із тексту' та 'Зображення із зображення'. Обробляйте зображення безпосередньо, використовуючи кнопки дій із зображеннями над поточним зображенням або в режимі перегляду.",
"postProcessDesc2": "Найближчим часом буде випущено спеціальний інтерфейс для більш сучасних процесів постобробки.",
"postProcessDesc3": "Інтерфейс командного рядка Invoke AI пропонує різні інші функції, включаючи збільшення Embiggen",
"training": "Навчання",
"trainingDesc1": "Спеціальний інтерфейс для навчання власних моделей з використанням Textual Inversion та Dreambooth",
"trainingDesc2": "InvokeAI вже підтримує навчання моделей за допомогою TI, через інтерфейс командного рядка.",
"upload": "Завантажити",
"close": "Закрити",
"load": "Завантажити",
"statusConnected": "Підключено",
"statusDisconnected": "Відключено",
"statusError": "Помилка",
"statusPreparing": "Підготування",
"statusProcessingCanceled": "Обробка перервана",
"statusProcessingComplete": "Обробка завершена",
"statusGenerating": "Генерація",
"statusGeneratingTextToImage": "Генерація зображення із тексту",
"statusGeneratingImageToImage": "Генерація зображення із зображення",
"statusGeneratingInpainting": "Домальовка всередині",
"statusGeneratingOutpainting": "Домальовка зовні",
"statusGenerationComplete": "Генерація завершена",
"statusIterationComplete": "Iтерація завершена",
"statusSavingImage": "Збереження зображення",
"statusRestoringFaces": "Відновлення облич",
"statusRestoringFacesGFPGAN": "Відновлення облич (GFPGAN)",
"statusRestoringFacesCodeFormer": "Відновлення облич (CodeFormer)",
"statusUpscaling": "Збільшення",
"statusUpscalingESRGAN": "Збільшення (ESRGAN)",
"statusLoadingModel": "Завантаження моделі",
"statusModelChanged": "Модель змінено"
}

View File

@@ -1,16 +1 @@
{
"generations": "Générations",
"showGenerations": "Afficher les générations",
"uploads": "Téléchargements",
"showUploads": "Afficher les téléchargements",
"galleryImageSize": "Taille de l'image",
"galleryImageResetSize": "Réinitialiser la taille",
"gallerySettings": "Paramètres de la galerie",
"maintainAspectRatio": "Maintenir le rapport d'aspect",
"autoSwitchNewImages": "Basculer automatiquement vers de nouvelles images",
"singleColumnLayout": "Mise en page en colonne unique",
"pinGallery": "Épingler la galerie",
"allImagesLoaded": "Toutes les images chargées",
"loadMore": "Charger plus",
"noImagesInGallery": "Aucune image dans la galerie"
}
{}

View File

@@ -1,16 +0,0 @@
{
"generations": "Генерації",
"showGenerations": "Показувати генерації",
"uploads": "Завантаження",
"showUploads": "Показувати завантаження",
"galleryImageSize": "Розмір зображень",
"galleryImageResetSize": "Аатоматичний розмір",
"gallerySettings": "Налаштування галереї",
"maintainAspectRatio": "Зберігати пропорції",
"autoSwitchNewImages": "Автоматично вибирати нові",
"singleColumnLayout": "Одна колонка",
"pinGallery": "Закріпити галерею",
"allImagesLoaded": "Всі зображення завантажені",
"loadMore": "Завантажити більше",
"noImagesInGallery": "Зображень немає"
}

View File

@@ -1,207 +1 @@
{
"keyboardShortcuts": "Raccourcis clavier",
"appHotkeys": "Raccourcis de l'application",
"GeneralHotkeys": "Raccourcis généraux",
"galleryHotkeys": "Raccourcis de la galerie",
"unifiedCanvasHotkeys": "Raccourcis du Canvas unifié",
"invoke": {
"title": "Invoquer",
"desc": "Générer une image"
},
"cancel": {
"title": "Annuler",
"desc": "Annuler la génération d'image"
},
"focusPrompt": {
"title": "Prompt de Focus",
"desc": "Mettre en focus la zone de saisie de la commande"
},
"toggleOptions": {
"title": "Basculer Options",
"desc": "Ouvrir et fermer le panneau d'options"
},
"pinOptions": {
"title": "Epingler Options",
"desc": "Epingler le panneau d'options"
},
"toggleViewer": {
"title": "Basculer Visionneuse",
"desc": "Ouvrir et fermer la visionneuse d'image"
},
"toggleGallery": {
"title": "Basculer Galerie",
"desc": "Ouvrir et fermer le tiroir de galerie"
},
"maximizeWorkSpace": {
"title": "Maximiser Espace de travail",
"desc": "Fermer les panneaux et maximiser la zone de travail"
},
"changeTabs": {
"title": "Changer d'onglets",
"desc": "Passer à un autre espace de travail"
},
"consoleToggle": {
"title": "Bascule de la console",
"desc": "Ouvrir et fermer la console"
},
"setPrompt": {
"title": "Définir le prompt",
"desc": "Utiliser le prompt de l'image actuelle"
},
"setSeed": {
"title": "Définir la graine",
"desc": "Utiliser la graine de l'image actuelle"
},
"setParameters": {
"title": "Définir les paramètres",
"desc": "Utiliser tous les paramètres de l'image actuelle"
},
"restoreFaces": {
"title": "Restaurer les faces",
"desc": "Restaurer l'image actuelle"
},
"upscale": {
"title": "Agrandir",
"desc": "Agrandir l'image actuelle"
},
"showInfo": {
"title": "Afficher les informations",
"desc": "Afficher les informations de métadonnées de l'image actuelle"
},
"sendToImageToImage": {
"title": "Envoyer à l'image à l'image",
"desc": "Envoyer l'image actuelle à l'image à l'image"
},
"deleteImage": {
"title": "Supprimer l'image",
"desc": "Supprimer l'image actuelle"
},
"closePanels": {
"title": "Fermer les panneaux",
"desc": "Fermer les panneaux ouverts"
},
"previousImage": {
"title": "Image précédente",
"desc": "Afficher l'image précédente dans la galerie"
},
"nextImage": {
"title": "Image suivante",
"desc": "Afficher l'image suivante dans la galerie"
},
"toggleGalleryPin": {
"title": "Activer/désactiver l'épinglage de la galerie",
"desc": "Épingle ou dépingle la galerie à l'interface utilisateur"
},
"increaseGalleryThumbSize": {
"title": "Augmenter la taille des miniatures de la galerie",
"desc": "Augmente la taille des miniatures de la galerie"
},
"decreaseGalleryThumbSize": {
"title": "Diminuer la taille des miniatures de la galerie",
"desc": "Diminue la taille des miniatures de la galerie"
},
"selectBrush": {
"title": "Sélectionner un pinceau",
"desc": "Sélectionne le pinceau de la toile"
},
"selectEraser": {
"title": "Sélectionner un gomme",
"desc": "Sélectionne la gomme de la toile"
},
"decreaseBrushSize": {
"title": "Diminuer la taille du pinceau",
"desc": "Diminue la taille du pinceau/gomme de la toile"
},
"increaseBrushSize": {
"title": "Augmenter la taille du pinceau",
"desc": "Augmente la taille du pinceau/gomme de la toile"
},
"decreaseBrushOpacity": {
"title": "Diminuer l'opacité du pinceau",
"desc": "Diminue l'opacité du pinceau de la toile"
},
"increaseBrushOpacity": {
"title": "Augmenter l'opacité du pinceau",
"desc": "Augmente l'opacité du pinceau de la toile"
},
"moveTool": {
"title": "Outil de déplacement",
"desc": "Permet la navigation sur la toile"
},
"fillBoundingBox": {
"title": "Remplir la boîte englobante",
"desc": "Remplit la boîte englobante avec la couleur du pinceau"
},
"eraseBoundingBox": {
"title": "Effacer la boîte englobante",
"desc": "Efface la zone de la boîte englobante"
},
"colorPicker": {
"title": "Sélectionnez le sélecteur de couleur",
"desc": "Sélectionne le sélecteur de couleur de la toile"
},
"toggleSnap": {
"title": "Basculer Snap",
"desc": "Basculer Snap à la grille"
},
"quickToggleMove": {
"title": "Basculer rapidement déplacer",
"desc": "Basculer temporairement le mode Déplacer"
},
"toggleLayer": {
"title": "Basculer la couche",
"desc": "Basculer la sélection de la couche masque/base"
},
"clearMask": {
"title": "Effacer le masque",
"desc": "Effacer entièrement le masque"
},
"hideMask": {
"title": "Masquer le masque",
"desc": "Masquer et démasquer le masque"
},
"showHideBoundingBox": {
"title": "Afficher/Masquer la boîte englobante",
"desc": "Basculer la visibilité de la boîte englobante"
},
"mergeVisible": {
"title": "Fusionner visible",
"desc": "Fusionner toutes les couches visibles de la toile"
},
"saveToGallery": {
"title": "Enregistrer dans la galerie",
"desc": "Enregistrer la toile actuelle dans la galerie"
},
"copyToClipboard": {
"title": "Copier dans le presse-papiers",
"desc": "Copier la toile actuelle dans le presse-papiers"
},
"downloadImage": {
"title": "Télécharger l'image",
"desc": "Télécharger la toile actuelle"
},
"undoStroke": {
"title": "Annuler le trait",
"desc": "Annuler un coup de pinceau"
},
"redoStroke": {
"title": "Rétablir le trait",
"desc": "Rétablir un coup de pinceau"
},
"resetView": {
"title": "Réinitialiser la vue",
"desc": "Réinitialiser la vue de la toile"
},
"previousStagingImage": {
"title": "Image de mise en scène précédente",
"desc": "Image précédente de la zone de mise en scène"
},
"nextStagingImage": {
"title": "Image de mise en scène suivante",
"desc": "Image suivante de la zone de mise en scène"
},
"acceptStagingImage": {
"title": "Accepter l'image de mise en scène",
"desc": "Accepter l'image actuelle de la zone de mise en scène"
}
}
{}

View File

@@ -1,207 +0,0 @@
{
"keyboardShortcuts": "Клавіатурні скорочення",
"appHotkeys": "Гарячі клавіші програми",
"generalHotkeys": "Загальні гарячі клавіші",
"galleryHotkeys": "Гарячі клавіші галереї",
"unifiedCanvasHotkeys": "Гарячі клавіші універсального полотна",
"invoke": {
"title": "Invoke",
"desc": "Згенерувати зображення"
},
"cancel": {
"title": "Скасувати",
"desc": "Скасувати генерацію зображення"
},
"focusPrompt": {
"title": "Переключитися на введення запиту",
"desc": "Перемикання на область введення запиту"
},
"toggleOptions": {
"title": "Показати/приховати параметри",
"desc": "Відкривати і закривати панель параметрів"
},
"pinOptions": {
"title": "Закріпити параметри",
"desc": "Закріпити панель параметрів"
},
"toggleViewer": {
"title": "Показати перегляд",
"desc": "Відкривати і закривати переглядач зображень"
},
"toggleGallery": {
"title": "Показати галерею",
"desc": "Відкривати і закривати скриньку галереї"
},
"maximizeWorkSpace": {
"title": "Максимізувати робочий простір",
"desc": "Приховати панелі і максимізувати робочу область"
},
"changeTabs": {
"title": "Переключити вкладку",
"desc": "Переключитися на іншу робочу область"
},
"consoleToggle": {
"title": "Показати консоль",
"desc": "Відкривати і закривати консоль"
},
"setPrompt": {
"title": "Використовувати запит",
"desc": "Використати запит із поточного зображення"
},
"setSeed": {
"title": "Використовувати сід",
"desc": "Використовувати сід поточного зображення"
},
"setParameters": {
"title": "Використовувати всі параметри",
"desc": "Використовувати всі параметри поточного зображення"
},
"restoreFaces": {
"title": "Відновити обличчя",
"desc": "Відновити обличчя на поточному зображенні"
},
"upscale": {
"title": "Збільшення",
"desc": "Збільшити поточне зображення"
},
"showInfo": {
"title": "Показати метадані",
"desc": "Показати метадані з поточного зображення"
},
"sendToImageToImage": {
"title": "Відправити в img2img",
"desc": "Надіслати поточне зображення в Image To Image"
},
"deleteImage": {
"title": "Видалити зображення",
"desc": "Видалити поточне зображення"
},
"closePanels": {
"title": "Закрити панелі",
"desc": "Закриває відкриті панелі"
},
"previousImage": {
"title": "Попереднє зображення",
"desc": "Відображати попереднє зображення в галереї"
},
"nextImage": {
"title": "Наступне зображення",
"desc": "Відображення наступного зображення в галереї"
},
"toggleGalleryPin": {
"title": "Закріпити галерею",
"desc": "Закріплює і відкріплює галерею"
},
"increaseGalleryThumbSize": {
"title": "Збільшити розмір мініатюр галереї",
"desc": "Збільшує розмір мініатюр галереї"
},
"reduceGalleryThumbSize": {
"title": "Зменшує розмір мініатюр галереї",
"desc": "Зменшує розмір мініатюр галереї"
},
"selectBrush": {
"title": "Вибрати пензель",
"desc": "Вибирає пензель для полотна"
},
"selectEraser": {
"title": "Вибрати ластик",
"desc": "Вибирає ластик для полотна"
},
"reduceBrushSize": {
"title": "Зменшити розмір пензля",
"desc": "Зменшує розмір пензля/ластика полотна"
},
"increaseBrushSize": {
"title": "Збільшити розмір пензля",
"desc": "Збільшує розмір пензля/ластика полотна"
},
"reduceBrushOpacity": {
"title": "Зменшити непрозорість пензля",
"desc": "Зменшує непрозорість пензля полотна"
},
"increaseBrushOpacity": {
"title": "Збільшити непрозорість пензля",
"desc": "Збільшує непрозорість пензля полотна"
},
"moveTool": {
"title": "Інструмент переміщення",
"desc": "Дозволяє переміщатися по полотну"
},
"fillBoundingBox": {
"title": "Заповнити обмежувальну рамку",
"desc": "Заповнює обмежувальну рамку кольором пензля"
},
"eraseBoundingBox": {
"title": "Стерти обмежувальну рамку",
"desc": "Стирає область обмежувальної рамки"
},
"colorPicker": {
"title": "Вибрати колір",
"desc": "Вибирає засіб вибору кольору полотна"
},
"toggleSnap": {
"title": "Увімкнути прив'язку",
"desc": "Вмикає/вимикає прив'язку до сітки"
},
"quickToggleMove": {
"title": "Швидке перемикання переміщення",
"desc": "Тимчасово перемикає режим переміщення"
},
"toggleLayer": {
"title": "Переключити шар",
"desc": "Перемикання маски/базового шару"
},
"clearMask": {
"title": "Очистити маску",
"desc": "Очистити всю маску"
},
"hideMask": {
"title": "Приховати маску",
"desc": "Приховує/показує маску"
},
"showHideBoundingBox": {
"title": "Показати/приховати обмежувальну рамку",
"desc": "Переключити видимість обмежувальної рамки"
},
"mergeVisible": {
"title": "Об'єднати видимі",
"desc": "Об'єднати всі видимі шари полотна"
},
"saveToGallery": {
"title": "Зберегти в галерею",
"desc": "Зберегти поточне полотно в галерею"
},
"copyToClipboard": {
"title": "Копіювати в буфер обміну",
"desc": "Копіювати поточне полотно в буфер обміну"
},
"downloadImage": {
"title": "Завантажити зображення",
"desc": "Завантажити вміст полотна"
},
"undoStroke": {
"title": "Скасувати пензель",
"desc": "Скасувати мазок пензля"
},
"redoStroke": {
"title": "Повторити мазок пензля",
"desc": "Повторити мазок пензля"
},
"resetView": {
"title": "Вид за замовчуванням",
"desc": "Скинути вид полотна"
},
"previousStagingImage": {
"title": "Попереднє зображення",
"desc": "Попереднє зображення"
},
"nextStagingImage": {
"title": "Наступне зображення",
"desc": "Наступне зображення"
},
"acceptStagingImage": {
"title": "Прийняти зображення",
"desc": "Прийняти поточне зображення"
}
}

View File

@@ -1,68 +0,0 @@
{
"modelManager": "Gestionnaire de modèle",
"model": "Modèle",
"allModels": "Tous les modèles",
"checkpointModels": "Points de contrôle",
"diffusersModels": "Diffuseurs",
"safetensorModels": "SafeTensors",
"modelAdded": "Modèle ajouté",
"modelUpdated": "Modèle mis à jour",
"modelEntryDeleted": "Entrée de modèle supprimée",
"cannotUseSpaces": "Ne peut pas utiliser d'espaces",
"addNew": "Ajouter un nouveau",
"addNewModel": "Ajouter un nouveau modèle",
"addCheckpointModel": "Ajouter un modèle de point de contrôle / SafeTensor",
"addDiffuserModel": "Ajouter des diffuseurs",
"addManually": "Ajouter manuellement",
"manual": "Manuel",
"name": "Nom",
"nameValidationMsg": "Entrez un nom pour votre modèle",
"description": "Description",
"descriptionValidationMsg": "Ajoutez une description pour votre modèle",
"config": "Config",
"configValidationMsg": "Chemin vers le fichier de configuration de votre modèle.",
"modelLocation": "Emplacement du modèle",
"modelLocationValidationMsg": "Chemin vers où votre modèle est situé localement.",
"repo_id": "ID de dépôt",
"repoIDValidationMsg": "Dépôt en ligne de votre modèle",
"vaeLocation": "Emplacement VAE",
"vaeLocationValidationMsg": "Chemin vers où votre VAE est situé.",
"vaeRepoID": "ID de dépôt VAE",
"vaeRepoIDValidationMsg": "Dépôt en ligne de votre VAE",
"width": "Largeur",
"widthValidationMsg": "Largeur par défaut de votre modèle.",
"height": "Hauteur",
"heightValidationMsg": "Hauteur par défaut de votre modèle.",
"addModel": "Ajouter un modèle",
"updateModel": "Mettre à jour le modèle",
"availableModels": "Modèles disponibles",
"search": "Rechercher",
"load": "Charger",
"active": "actif",
"notLoaded": "non chargé",
"cached": "en cache",
"checkpointFolder": "Dossier de point de contrôle",
"clearCheckpointFolder": "Effacer le dossier de point de contrôle",
"findModels": "Trouver des modèles",
"scanAgain": "Scanner à nouveau",
"modelsFound": "Modèles trouvés",
"selectFolder": "Sélectionner un dossier",
"selected": "Sélectionné",
"selectAll": "Tout sélectionner",
"deselectAll": "Tout désélectionner",
"showExisting": "Afficher existant",
"addSelected": "Ajouter sélectionné",
"modelExists": "Modèle existant",
"selectAndAdd": "Sélectionner et ajouter les modèles listés ci-dessous",
"noModelsFound": "Aucun modèle trouvé",
"delete": "Supprimer",
"deleteModel": "Supprimer le modèle",
"deleteConfig": "Supprimer la configuration",
"deleteMsg1": "Êtes-vous sûr de vouloir supprimer cette entrée de modèle dans InvokeAI?",
"deleteMsg2": "Cela n'effacera pas le fichier de point de contrôle du modèle de votre disque. Vous pouvez les réajouter si vous le souhaitez.",
"formMessageDiffusersModelLocation": "Emplacement du modèle de diffuseurs",
"formMessageDiffusersModelLocationDesc": "Veuillez en entrer au moins un.",
"formMessageDiffusersVAELocation": "Emplacement VAE",
"formMessageDiffusersVAELocationDesc": "Si non fourni, InvokeAI recherchera le fichier VAE à l'emplacement du modèle donné ci-dessus."
}

View File

@@ -1,34 +1,24 @@
{
"modelManager": "Gestione Modelli",
"model": "Modello",
"allModels": "Tutti i Modelli",
"checkpointModels": "Checkpoint",
"diffusersModels": "Diffusori",
"safetensorModels": "SafeTensor",
"modelAdded": "Modello Aggiunto",
"modelUpdated": "Modello Aggiornato",
"modelEntryDeleted": "Modello Rimosso",
"cannotUseSpaces": "Impossibile utilizzare gli spazi",
"addNew": "Aggiungi nuovo",
"addNewModel": "Aggiungi nuovo Modello",
"addCheckpointModel": "Aggiungi modello Checkpoint / Safetensor",
"addDiffuserModel": "Aggiungi Diffusori",
"addManually": "Aggiungi manualmente",
"manual": "Manuale",
"name": "Nome",
"nameValidationMsg": "Inserisci un nome per il modello",
"description": "Descrizione",
"descriptionValidationMsg": "Aggiungi una descrizione per il modello",
"config": "Configurazione",
"config": "Config",
"configValidationMsg": "Percorso del file di configurazione del modello.",
"modelLocation": "Posizione del modello",
"modelLocationValidationMsg": "Percorso dove si trova il modello.",
"repo_id": "Repo ID",
"repoIDValidationMsg": "Repository online del modello",
"vaeLocation": "Posizione file VAE",
"vaeLocationValidationMsg": "Percorso dove si trova il file VAE.",
"vaeRepoID": "VAE Repo ID",
"vaeRepoIDValidationMsg": "Repository online del file VAE",
"width": "Larghezza",
"widthValidationMsg": "Larghezza predefinita del modello.",
"height": "Altezza",
@@ -59,9 +49,5 @@
"deleteModel": "Elimina modello",
"deleteConfig": "Elimina configurazione",
"deleteMsg1": "Sei sicuro di voler eliminare questo modello da InvokeAI?",
"deleteMsg2": "Questo non eliminerà il file Checkpoint del modello dal tuo disco. Puoi aggiungerlo nuovamente se lo desideri.",
"formMessageDiffusersModelLocation": "Ubicazione modelli diffusori",
"formMessageDiffusersModelLocationDesc": "Inseriscine almeno uno.",
"formMessageDiffusersVAELocation": "Ubicazione file VAE",
"formMessageDiffusersVAELocationDesc": "Se non fornito, InvokeAI cercherà il file VAE all'interno dell'ubicazione del modello sopra indicata."
"deleteMsg2": "Questo non eliminerà il file Checkpoint del modello dal tuo disco. Puoi aggiungerlo nuovamente se lo desideri."
}

View File

@@ -1,53 +0,0 @@
{
"modelManager": "Менеджер моделей",
"model": "Модель",
"modelAdded": "Модель додана",
"modelUpdated": "Модель оновлена",
"modelEntryDeleted": "Запис про модель видалено",
"cannotUseSpaces": "Не можна використовувати пробіли",
"addNew": "Додати нову",
"addNewModel": "Додати нову модель",
"addManually": "Додати вручну",
"manual": "Ручне",
"name": "Назва",
"nameValidationMsg": "Введіть назву моделі",
"description": "Опис",
"descriptionValidationMsg": "Введіть опис моделі",
"config": "Файл конфігурації",
"configValidationMsg": "Шлях до файлу конфігурації",
"modelLocation": "Розташування моделі",
"modelLocationValidationMsg": "Шлях до файлу з моделлю",
"vaeLocation": "Розтышування VAE",
"vaeLocationValidationMsg": "Шлях до VAE",
"width": "Ширина",
"widthValidationMsg": "Початкова ширина зображень",
"height": "Висота",
"heightValidationMsg": "Початкова висота зображень",
"addModel": "Додати модель",
"updateModel": "Оновити модель",
"availableModels": "Доступні моделі",
"search": "Шукати",
"load": "Завантажити",
"active": "активна",
"notLoaded": "не завантажена",
"cached": "кешована",
"checkpointFolder": "Папка з моделями",
"clearCheckpointFolder": "Очистити папку з моделями",
"findModels": "Знайти моделі",
"scanAgain": "Сканувати знову",
"modelsFound": "Знайдені моделі",
"selectFolder": "Обрати папку",
"selected": "Обрані",
"selectAll": "Обрати всі",
"deselectAll": "Зняти выділення",
"showExisting": "Показувати додані",
"addSelected": "Додати обрані",
"modelExists": "Модель вже додана",
"selectAndAdd": "Оберіть і додайте моделі із списку",
"noModelsFound": "Моделі не знайдені",
"delete": "Видалити",
"deleteModel": "Видалити модель",
"deleteConfig": "Видалити конфігурацію",
"deleteMsg1": "Ви точно хочете видалити модель із InvokeAI?",
"deleteMsg2": "Це не призведе до видалення файлу моделі з диску. Позніше ви можете додати його знову."
}

View File

@@ -24,7 +24,6 @@
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity",
"seamSize": "Seam Size",

View File

@@ -24,7 +24,6 @@
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity",
"seamSize": "Seam Size",
@@ -44,7 +43,6 @@
"invoke": "Invoke",
"cancel": "Cancel",
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts",
"sendTo": "Send to",
"sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas",

View File

@@ -40,7 +40,7 @@
"infillScalingHeader": "Riempimento e ridimensionamento",
"img2imgStrength": "Forza da Immagine a Immagine",
"toggleLoopback": "Attiva/disattiva elaborazione ricorsiva",
"invoke": "Invoke",
"invoke": "Invoca",
"cancel": "Annulla",
"promptPlaceholder": "Digita qui il prompt usando termini in lingua inglese. [token negativi], (aumenta il peso)++, (diminuisci il peso)--, scambia e fondi sono disponibili (consulta la documentazione)",
"sendTo": "Invia a",

View File

@@ -1,62 +0,0 @@
{
"images": "Images",
"steps": "Etapes",
"cfgScale": "CFG Echelle",
"width": "Largeur",
"height": "Hauteur",
"sampler": "Echantillonneur",
"seed": "Graine",
"randomizeSeed": "Graine Aléatoire",
"shuffle": "Mélanger",
"noiseThreshold": "Seuil de Bruit",
"perlinNoise": "Bruit de Perlin",
"variations": "Variations",
"variationAmount": "Montant de Variation",
"seedWeights": "Poids des Graines",
"faceRestoration": "Restauration de Visage",
"restoreFaces": "Restaurer les Visages",
"type": "Type",
"strength": "Force",
"upscaling": "Agrandissement",
"upscale": "Agrandir",
"upscaleImage": "Image en Agrandissement",
"scale": "Echelle",
"otherOptions": "Autres Options",
"seamlessTiling": "Carreau Sans Joint",
"hiresOptim": "Optimisation Haute Résolution",
"imageFit": "Ajuster Image Initiale à la Taille de Sortie",
"codeformerFidelity": "Fidélité",
"seamSize": "Taille des Joints",
"seamBlur": "Flou des Joints",
"seamStrength": "Force des Joints",
"seamSteps": "Etapes des Joints",
"scaleBeforeProcessing": "Echelle Avant Traitement",
"scaledWidth": "Larg. Échelle",
"scaledHeight": "Haut. Échelle",
"infillMethod": "Méthode de Remplissage",
"tileSize": "Taille des Tuiles",
"boundingBoxHeader": "Boîte Englobante",
"seamCorrectionHeader": "Correction des Joints",
"infillScalingHeader": "Remplissage et Mise à l'Échelle",
"img2imgStrength": "Force de l'Image à l'Image",
"toggleLoopback": "Activer/Désactiver la Boucle",
"invoke": "Invoker",
"cancel": "Annuler",
"promptPlaceholder": "Tapez le prompt ici. [tokens négatifs], (poids positif)++, (poids négatif)--, swap et blend sont disponibles (voir les docs)",
"sendTo": "Envoyer à",
"sendToImg2Img": "Envoyer à Image à Image",
"sendToUnifiedCanvas": "Envoyer au Canvas Unifié",
"copyImage": "Copier Image",
"copyImageToLink": "Copier l'Image en Lien",
"downloadImage": "Télécharger Image",
"openInViewer": "Ouvrir dans le visualiseur",
"closeViewer": "Fermer le visualiseur",
"usePrompt": "Utiliser la suggestion",
"useSeed": "Utiliser la graine",
"useAll": "Tout utiliser",
"useInitImg": "Utiliser l'image initiale",
"info": "Info",
"deleteImage": "Supprimer l'image",
"initialImage": "Image initiale",
"showOptionsPanel": "Afficher le panneau d'options"
}

View File

@@ -1,62 +0,0 @@
{
"images": "Зображення",
"steps": "Кроки",
"cfgScale": "Рівень CFG",
"width": "Ширина",
"height": "Висота",
"sampler": "Семплер",
"seed": "Сід",
"randomizeSeed": "Випадковий сид",
"shuffle": "Оновити",
"noiseThreshold": "Поріг шуму",
"perlinNoise": "Шум Перліна",
"variations": "Варіації",
"variationAmount": "Кількість варіацій",
"seedWeights": "Вага сіду",
"faceRestoration": "Відновлення облич",
"restoreFaces": "Відновити обличчя",
"type": "Тип",
"strength": "Сила",
"upscaling": "Збільшення",
"upscale": "Збільшити",
"upscaleImage": "Збільшити зображення",
"scale": "Масштаб",
"otherOptions": "інші параметри",
"seamlessTiling": "Безшовний узор",
"hiresOptim": "Висока роздільна здатність",
"imageFit": "Вмістити зображення",
"codeformerFidelity": "Точність",
"seamSize": "Размір шву",
"seamBlur": "Розмиття шву",
"seamStrength": "Сила шву",
"seamSteps": "Кроки шву",
"inpaintReplace": "Inpaint-заміна",
"scaleBeforeProcessing": "Масштабувати",
"scaledWidth": "Масштаб Ш",
"scaledHeight": "Масштаб В",
"infillMethod": "Засіб заповнення",
"tileSize": "Розмір області",
"boundingBoxHeader": "Обмежуюча рамка",
"seamCorrectionHeader": "Налаштування шву",
"infillScalingHeader": "Заповнення і масштабування",
"img2imgStrength": "Сила обробки img2img",
"toggleLoopback": "Зациклити обробку",
"invoke": "Викликати",
"cancel": "Скасувати",
"promptPlaceholder": "Введіть запит тут (англійською). [видалені токени], (більш вагомі)++, (менш вагомі)--, swap и blend також доступні (дивіться Github)",
"sendTo": "Надіслати",
"sendToImg2Img": "Надіслати у img2img",
"sendToUnifiedCanvas": "Надіслати на полотно",
"copyImageToLink": "Скопіювати посилання",
"downloadImage": "Завантажити",
"openInViewer": "Відкрити у переглядачі",
"closeViewer": "Закрити переглядач",
"usePrompt": "Використати запит",
"useSeed": "Використати сід",
"useAll": "Використати все",
"useInitImg": "Використати як початкове",
"info": "Метадані",
"deleteImage": "Видалити зображення",
"initialImage": "Початкове зображення",
"showOptionsPanel": "Показати панель налаштувань"
}

View File

@@ -1,13 +1 @@
{
"models": "Modèles",
"displayInProgress": "Afficher les images en cours",
"saveSteps": "Enregistrer les images tous les n étapes",
"confirmOnDelete": "Confirmer la suppression",
"displayHelpIcons": "Afficher les icônes d'aide",
"useCanvasBeta": "Utiliser la mise en page bêta de Canvas",
"enableImageDebugging": "Activer le débogage d'image",
"resetWebUI": "Réinitialiser l'interface Web",
"resetWebUIDesc1": "Réinitialiser l'interface Web ne réinitialise que le cache local du navigateur de vos images et de vos paramètres enregistrés. Cela n'efface pas les images du disque.",
"resetWebUIDesc2": "Si les images ne s'affichent pas dans la galerie ou si quelque chose d'autre ne fonctionne pas, veuillez essayer de réinitialiser avant de soumettre une demande sur GitHub.",
"resetComplete": "L'interface Web a été réinitialisée. Rafraîchissez la page pour recharger."
}
{}

View File

@@ -1,13 +0,0 @@
{
"models": "Моделі",
"displayInProgress": "Показувати процес генерації",
"saveSteps": "Зберігати кожні n кроків",
"confirmOnDelete": "Підтверджувати видалення",
"displayHelpIcons": "Показувати значки підказок",
"useCanvasBeta": "Показувати інструменты зліва (Beta UI)",
"enableImageDebugging": "Увімкнути налагодження",
"resetWebUI": "Повернути початкові",
"resetWebUIDesc1": "Скидання настройок веб-інтерфейсу видаляє лише локальний кеш браузера з вашими зображеннями та налаштуваннями. Це не призводить до видалення зображень з диску.",
"resetWebUIDesc2": "Якщо зображення не відображаються в галереї або не працює ще щось, спробуйте скинути налаштування, перш ніж повідомляти про проблему на GitHub.",
"resetComplete": "Інтерфейс скинуто. Оновіть цю сторінку."
}

View File

@@ -1,32 +1 @@
{
"tempFoldersEmptied": "Dossiers temporaires vidés",
"uploadFailed": "Téléchargement échoué",
"uploadFailedMultipleImagesDesc": "Plusieurs images collées, peut uniquement télécharger une image à la fois",
"uploadFailedUnableToLoadDesc": "Impossible de charger le fichier",
"downloadImageStarted": "Téléchargement de l'image démarré",
"imageCopied": "Image copiée",
"imageLinkCopied": "Lien d'image copié",
"imageNotLoaded": "Aucune image chargée",
"imageNotLoadedDesc": "Aucune image trouvée pour envoyer à module d'image",
"imageSavedToGallery": "Image enregistrée dans la galerie",
"canvasMerged": "Canvas fusionné",
"sentToImageToImage": "Envoyé à Image à Image",
"sentToUnifiedCanvas": "Envoyé à Canvas unifié",
"parametersSet": "Paramètres définis",
"parametersNotSet": "Paramètres non définis",
"parametersNotSetDesc": "Aucune métadonnée trouvée pour cette image.",
"parametersFailed": "Problème de chargement des paramètres",
"parametersFailedDesc": "Impossible de charger l'image d'initiation.",
"seedSet": "Graine définie",
"seedNotSet": "Graine non définie",
"seedNotSetDesc": "Impossible de trouver la graine pour cette image.",
"promptSet": "Invite définie",
"promptNotSet": "Invite non définie",
"promptNotSetDesc": "Impossible de trouver l'invite pour cette image.",
"upscalingFailed": "Échec de la mise à l'échelle",
"faceRestoreFailed": "Échec de la restauration du visage",
"metadataLoadFailed": "Échec du chargement des métadonnées",
"initialImageSet": "Image initiale définie",
"initialImageNotSet": "Image initiale non définie",
"initialImageNotSetDesc": "Impossible de charger l'image initiale"
}
{}

View File

@@ -1,32 +0,0 @@
{
"tempFoldersEmptied": "Тимчасова папка очищена",
"uploadFailed": "Не вдалося завантажити",
"uploadFailedMultipleImagesDesc": "Можна вставити лише одне зображення (ви спробували вставити декілька)",
"uploadFailedUnableToLoadDesc": "Неможливо завантажити файл",
"downloadImageStarted": "Завантаження зображення почалося",
"imageCopied": "Зображення скопійоване",
"imageLinkCopied": "Посилання на зображення скопійовано",
"imageNotLoaded": "Зображення не завантажено",
"imageNotLoadedDesc": "Не знайдено зображення для надсилання до img2img",
"imageSavedToGallery": "Зображення збережено в галерею",
"canvasMerged": "Полотно об'єднане",
"sentToImageToImage": "Надіслати до img2img",
"sentToUnifiedCanvas": "Надіслати на полотно",
"parametersSet": "Параметри задані",
"parametersNotSet": "Параметри не задані",
"parametersNotSetDesc": "Не знайдені метадані цього зображення",
"parametersFailed": "Проблема із завантаженням параметрів",
"parametersFailedDesc": "Неможливо завантажити початкове зображення",
"seedSet": "Сід заданий",
"seedNotSet": "Сід не заданий",
"seedNotSetDesc": "Не вдалося знайти сід для зображення",
"promptSet": "Запит заданий",
"promptNotSet": "Запит не заданий",
"promptNotSetDesc": "Не вдалося знайти запит для зображення",
"upscalingFailed": "Збільшення не вдалося",
"faceRestoreFailed": "Відновлення облич не вдалося",
"metadataLoadFailed": "Не вдалося завантажити метадані",
"initialImageSet": "Початкове зображення задане",
"initialImageNotSet": "Початкове зображення не задане",
"initialImageNotSetDesc": "Не вдалося завантажити початкове зображення"
}

View File

@@ -1,15 +0,0 @@
{
"feature": {
"prompt": "Ceci est le champ prompt. Le prompt inclut des objets de génération et des termes stylistiques. Vous pouvez également ajouter un poids (importance du jeton) dans le prompt, mais les commandes CLI et les paramètres ne fonctionneront pas.",
"gallery": "La galerie affiche les générations à partir du dossier de sortie à mesure qu'elles sont créées. Les paramètres sont stockés dans des fichiers et accessibles via le menu contextuel.",
"other": "Ces options activent des modes de traitement alternatifs pour Invoke. 'Tuilage seamless' créera des motifs répétitifs dans la sortie. 'Haute résolution' est la génération en deux étapes avec img2img: utilisez ce paramètre lorsque vous souhaitez une image plus grande et plus cohérente sans artefacts. Cela prendra plus de temps que d'habitude txt2img.",
"seed": "La valeur de grain affecte le bruit initial à partir duquel l'image est formée. Vous pouvez utiliser les graines déjà existantes provenant d'images précédentes. 'Seuil de bruit' est utilisé pour atténuer les artefacts à des valeurs CFG élevées (essayez la plage de 0 à 10), et Perlin pour ajouter du bruit Perlin pendant la génération: les deux servent à ajouter de la variété à vos sorties.",
"variations": "Essayez une variation avec une valeur comprise entre 0,1 et 1,0 pour changer le résultat pour une graine donnée. Des variations intéressantes de la graine sont entre 0,1 et 0,3.",
"upscale": "Utilisez ESRGAN pour agrandir l'image immédiatement après la génération.",
"faceCorrection": "Correction de visage avec GFPGAN ou Codeformer: l'algorithme détecte les visages dans l'image et corrige tout défaut. La valeur élevée changera plus l'image, ce qui donnera des visages plus attirants. Codeformer avec une fidélité plus élevée préserve l'image originale au prix d'une correction de visage plus forte.",
"imageToImage": "Image to Image charge n'importe quelle image en tant qu'initiale, qui est ensuite utilisée pour générer une nouvelle avec le prompt. Plus la valeur est élevée, plus l'image de résultat changera. Des valeurs de 0,0 à 1,0 sont possibles, la plage recommandée est de 0,25 à 0,75",
"boundingBox": "La boîte englobante est la même que les paramètres Largeur et Hauteur pour Texte à Image ou Image à Image. Seulement la zone dans la boîte sera traitée.",
"seamCorrection": "Contrôle la gestion des coutures visibles qui se produisent entre les images générées sur la toile.",
"infillAndScaling": "Gérer les méthodes de remplissage (utilisées sur les zones masquées ou effacées de la toile) et le redimensionnement (utile pour les petites tailles de boîte englobante)."
}
}

View File

@@ -1,15 +0,0 @@
{
"feature": {
"prompt": "Це поле для тексту запиту, включаючи об'єкти генерації та стилістичні терміни. У запит можна включити і коефіцієнти ваги (значущості токена), але консольні команди та параметри не працюватимуть.",
"gallery": "Тут відображаються генерації з папки outputs у міру їх появи.",
"other": "Ці опції включають альтернативні режими обробки для Invoke. 'Безшовний узор' створить на виході узори, що повторюються. 'Висока роздільна здатність' - це генерація у два етапи за допомогою img2img: використовуйте це налаштування, коли хочете отримати цільне зображення більшого розміру без артефактів.",
"seed": "Значення сіду впливає на початковий шум, з якого сформується зображення. Можна використовувати вже наявний сід із попередніх зображень. 'Поріг шуму' використовується для пом'якшення артефактів при високих значеннях CFG (спробуйте в діапазоні 0-10), а 'Перлін' - для додавання шуму Перліна в процесі генерації: обидва параметри служать для більшої варіативності результатів.",
"variations": "Спробуйте варіацію зі значенням від 0.1 до 1.0, щоб змінити результат для заданого сиду. Цікаві варіації сиду знаходяться між 0.1 і 0.3.",
"upscale": "Використовуйте ESRGAN, щоб збільшити зображення відразу після генерації.",
"faceCorrection": "Корекція облич за допомогою GFPGAN або Codeformer: алгоритм визначає обличчя у готовому зображенні та виправляє будь-які дефекти. Високі значення сили змінюють зображення сильніше, в результаті обличчя будуть виглядати привабливіше. У Codeformer більш висока точність збереже вихідне зображення на шкоду корекції обличчя.",
"imageToImage": "'Зображення до зображення' завантажує будь-яке зображення, яке потім використовується для генерації разом із запитом. Чим більше значення, тим сильніше зміниться зображення в результаті. Можливі значення від 0 до 1, рекомендується діапазон 0.25-0.75",
"boundingBox": "'Обмежуюча рамка' аналогічна налаштуванням 'Ширина' і 'Висота' для 'Зображення з тексту' або 'Зображення до зображення'. Буде оброблена тільки область у рамці.",
"seamCorrection": "Керування обробкою видимих швів, що виникають між зображеннями на полотні.",
"infillAndScaling": "Керування методами заповнення (використовується для масок або стертих частин полотна) та масштабування (корисно для малих розмірів обмежуючої рамки)."
}
}

View File

@@ -1,59 +1 @@
{
"layer": "Couche",
"base": "Base",
"mask": "Masque",
"maskingOptions": "Options de masquage",
"enableMask": "Activer le masque",
"preserveMaskedArea": "Préserver la zone masquée",
"clearMask": "Effacer le masque",
"brush": "Pinceau",
"eraser": "Gomme",
"fillBoundingBox": "Remplir la boîte englobante",
"eraseBoundingBox": "Effacer la boîte englobante",
"colorPicker": "Sélecteur de couleur",
"brushOptions": "Options de pinceau",
"brushSize": "Taille",
"move": "Déplacer",
"resetView": "Réinitialiser la vue",
"mergeVisible": "Fusionner les visibles",
"saveToGallery": "Enregistrer dans la galerie",
"copyToClipboard": "Copier dans le presse-papiers",
"downloadAsImage": "Télécharger en tant qu'image",
"undo": "Annuler",
"redo": "Refaire",
"clearCanvas": "Effacer le canvas",
"canvasSettings": "Paramètres du canvas",
"showIntermediates": "Afficher les intermédiaires",
"showGrid": "Afficher la grille",
"snapToGrid": "Aligner sur la grille",
"darkenOutsideSelection": "Assombrir à l'extérieur de la sélection",
"autoSaveToGallery": "Enregistrement automatique dans la galerie",
"saveBoxRegionOnly": "Enregistrer uniquement la région de la boîte",
"limitStrokesToBox": "Limiter les traits à la boîte",
"showCanvasDebugInfo": "Afficher les informations de débogage du canvas",
"clearCanvasHistory": "Effacer l'historique du canvas",
"clearHistory": "Effacer l'historique",
"clearCanvasHistoryMessage": "Effacer l'historique du canvas laisse votre canvas actuel intact, mais efface de manière irréversible l'historique annuler et refaire.",
"clearCanvasHistoryConfirm": "Êtes-vous sûr de vouloir effacer l'historique du canvas?",
"emptyTempImageFolder": "Vider le dossier d'images temporaires",
"emptyFolder": "Vider le dossier",
"emptyTempImagesFolderMessage": "Vider le dossier d'images temporaires réinitialise également complètement le canvas unifié. Cela inclut tout l'historique annuler/refaire, les images dans la zone de mise en attente et la couche de base du canvas.",
"emptyTempImagesFolderConfirm": "Êtes-vous sûr de vouloir vider le dossier temporaire?",
"activeLayer": "Calque actif",
"canvasScale": "Échelle du canevas",
"boundingBox": "Boîte englobante",
"scaledBoundingBox": "Boîte englobante mise à l'échelle",
"boundingBoxPosition": "Position de la boîte englobante",
"canvasDimensions": "Dimensions du canevas",
"canvasPosition": "Position du canevas",
"cursorPosition": "Position du curseur",
"previous": "Précédent",
"next": "Suivant",
"accept": "Accepter",
"showHide": "Afficher/Masquer",
"discardAll": "Tout abandonner",
"betaClear": "Effacer",
"betaDarkenOutside": "Assombrir à l'extérieur",
"betaLimitToBox": "Limiter à la boîte",
"betaPreserveMasked": "Conserver masqué"
}
{}

View File

@@ -1,5 +1,5 @@
{
"layer": "Livello",
"layer": "Layer",
"base": "Base",
"mask": "Maschera",
"maskingOptions": "Opzioni di mascheramento",

View File

@@ -1,59 +0,0 @@
{
"layer": "Шар",
"base": "Базовий",
"mask": "Маска",
"maskingOptions": "Параметри маски",
"enableMask": "Увiмкнути маску",
"preserveMaskedArea": "Зберiгати замасковану область",
"clearMask": "Очистити маску",
"brush": "Пензель",
"eraser": "Гумка",
"fillBoundingBox": "Заповнити обмежуючу рамку",
"eraseBoundingBox": "Стерти обмежуючу рамку",
"colorPicker": "Пiпетка",
"brushOptions": "Параметри пензля",
"brushSize": "Розмiр",
"move": еремiстити",
"resetView": "Скинути вигляд",
"mergeVisible": "Об'єднати видимi",
"saveToGallery": "Зберегти до галереї",
"copyToClipboard": "Копiювати до буферу обмiну",
"downloadAsImage": "Завантажити як зображення",
"undo": "Вiдмiнити",
"redo": "Повторити",
"clearCanvas": "Очистити полотно",
"canvasSettings": "Налаштування полотна",
"showIntermediates": "Показувати процес",
"showGrid": "Показувати сiтку",
"snapToGrid": "Прив'язати до сітки",
"darkenOutsideSelection": "Затемнити полотно зовні",
"autoSaveToGallery": "Автозбереження до галереї",
"saveBoxRegionOnly": "Зберiгати тiльки видiлення",
"limitStrokesToBox": "Обмежити штрихи виділенням",
"showCanvasDebugInfo": "Показати налаштування полотна",
"clearCanvasHistory": "Очистити iсторiю полотна",
"clearHistory": "Очистити iсторiю",
"clearCanvasHistoryMessage": "Очищення історії полотна залишає поточне полотно незайманим, але видаляє історію скасування та повтору",
"clearCanvasHistoryConfirm": "Ви впевнені, що хочете очистити історію полотна?",
"emptyTempImageFolder": "Очистити тимчасову папку",
"emptyFolder": "Очистити папку",
"emptyTempImagesFolderMessage": "Очищення папки тимчасових зображень також повністю скидає полотно, включаючи всю історію скасування/повтору, зображення та базовий шар полотна, що розміщуються.",
"emptyTempImagesFolderConfirm": "Ви впевнені, що хочете очистити тимчасову папку?",
"activeLayer": "Активний шар",
"canvasScale": "Масштаб полотна",
"boundingBox": "Обмежуюча рамка",
"scaledBoundingBox": "Масштабування рамки",
"boundingBoxPosition": "Позиція обмежуючої рамки",
"canvasDimensions": "Разміри полотна",
"canvasPosition": "Розташування полотна",
"cursorPosition": "Розташування курсора",
"previous": "Попереднє",
"next": "Наступне",
"принять": "Приняти",
"showHide": "Показати/Сховати",
"discardAll": "Відмінити все",
"betaClear": "Очистити",
"betaDarkenOutside": "Затемнити зовні",
"betaLimitToBox": "Обмежити виділенням",
"betaPreserveMasked": "Зберiгати замасковану область"
}

View File

@@ -0,0 +1,23 @@
{
"eslintConfig": {
"extends": [
"eslint:recommended",
"plugin:@typescript-eslint/recommended",
"plugin:react-hooks/recommended"
],
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint", "eslint-plugin-react-hooks"],
"root": true,
"settings": {
"import/resolver": {
"node": {
"paths": ["src"],
"extensions": [".js", ".jsx", ".ts", ".tsx"]
}
}
},
"rules": {
"react/jsx-filename-extension": [1, { "extensions": [".tsx", ".ts"] }]
}
}
}

Some files were not shown because too many files have changed in this diff Show More