Compare commits

...

605 Commits

Author SHA1 Message Date
Lincoln Stein
f5d1fbd896 Update main to release v2.3.0 (#2608)
# Release 2.3.0

This will bring `main` up to date with release 2.3.0. I will need
approvals from @mauwii (docs) and @blessedcoolant (for _version.py).
2023-02-09 17:28:55 -05:00
Lincoln Stein
424cee63f1 Merge branch 'main' into release/2.3.0-last-tweaks 2023-02-09 16:36:51 -05:00
blessedcoolant
da193ecd4a ESLint EOL Fix 2023-02-10 09:11:07 +13:00
psychedelicious
56fd202e21 builds frontend 2023-02-10 08:24:40 +13:00
Jonathan
29454a2974 Update generationSlice.ts 2023-02-10 08:24:40 +13:00
Jonathan
c977d295f5 Update generationSlice.ts 2023-02-10 08:24:40 +13:00
Jonathan
28eaffa188 Update generationSlice.ts
Added perlin noise state restoration.
2023-02-10 08:24:40 +13:00
psychedelicious
3feff09fb3 fixes #2049 use threshold not setting correct value 2023-02-10 08:24:40 +13:00
Lincoln Stein
158d1ef384 bump version number; update contributors 2023-02-09 13:01:08 -05:00
Lincoln Stein
4785a1cd05 Up version to 2.3.0-rc7 (#2591)
This brings `main` up to date with 2.3.0 release candidate 7.
2023-02-08 22:06:58 -05:00
Lincoln Stein
2876c4ddec Merge branch 'main' into 2.3.0rc7 2023-02-08 21:40:14 -05:00
Lincoln Stein
4bce81de26 blank out lstein's employer info 2023-02-08 18:08:02 -05:00
Lincoln Stein
12d15a1a3f Up version to 2.3.0-rc7 2023-02-08 15:55:35 -05:00
Lincoln Stein
2aad4dab90 Initial Slider & Img2Img=1 Updates (#2467)
Adding a slider for Hi Res Fix to control Img2Img

Updated Img2img to accept values of 1 (replacing Inpaint Replace)
2023-02-08 15:50:44 -05:00
Lincoln Stein
c19107e0a8 Merge branch 'main' into Img2Img-Slider-Updates 2023-02-08 15:21:46 -05:00
Lincoln Stein
eaf29e1751 Make menu options in invoke.bat the same as options in invoke.sh (#2588)
- This makes the launcher options menu on Windows look and act the same
as the Linux/Mac launcher, which previously was lacking the command-line
help option and didn't list item (6) as an option.
2023-02-08 15:20:43 -05:00
psychedelicious
d964374a91 builds frontend 2023-02-09 07:03:58 +11:00
Kent Keirsey
9826f80d7f Initial Slider & Img2Img=1 Updates 2023-02-09 07:02:39 +11:00
Lincoln Stein
ec89bd19dc Merge branch 'main' into installer/fix-launcher-menu 2023-02-08 14:54:36 -05:00
Lincoln Stein
23aaf54f56 Documentation for 2.3.0 (#2564)
Work in progress. I am reviewing and updating the documentation for
2.3.0. The following sections need to be done:

- [x] index.md
- [x] installation/010_INSTALL_AUTOMATED.md
- [x] installation/020_INSTALL_MANUAL.md
- [x] installation/030_INSTALL_CUDA_AND_ROCM.md (needs to be written
from scratch)
- [x] installation/040_INSTALL_DOCKER.md
- [x] installation/050_INSTALLING_MODELS.md
- [x] features/CLI.md
- [x] features/WEB.md
2023-02-08 14:54:20 -05:00
Lincoln Stein
6d3cc25bca Merge branch 'main' into 2.3-documentation-fixes 2023-02-08 14:29:35 -05:00
Lincoln Stein
c9d246c4ec Update 050_INSTALLING_MODELS.md (#2576)
Using Windows 10 I found I needed to use double backslashes to import a
new model, when using single backslash the output would say
"e:_ProjectsCodemodelsldmstable-diffusion-model-to-import.ckpt is
neither the path to a .ckpt file nor a diffusers repository id. Can't
import." This added tip in the documentation will help Windows users
overcome this.
2023-02-08 14:25:36 -05:00
mauwii
74406456f2 Fix links (ignored deprecated folder) 2023-02-08 20:07:27 +01:00
Lincoln Stein
8e0cd2df18 add 2.3.0 release date 2023-02-08 14:06:53 -05:00
Lincoln Stein
4d4b1777db Merge branch 'main' into patch-1 2023-02-08 13:59:47 -05:00
Lincoln Stein
d6e5da6e37 deprecated out of date FAQ 2023-02-08 13:58:17 -05:00
Lincoln Stein
dec7d8b160 fix up the features/overview document 2023-02-08 13:52:02 -05:00
Lincoln Stein
4ecf016ace Merge branch 'main' into 2.3-documentation-fixes 2023-02-08 12:47:27 -05:00
Lincoln Stein
4d74af2363 Update docs/installation/030_INSTALL_CUDA_AND_ROCM.md
Co-authored-by: Eugene Brodsky <ebr@users.noreply.github.com>
2023-02-08 12:46:36 -05:00
Lincoln Stein
c6a2ba12e2 finished CLI, IMG2IMG and WEB updates 2023-02-08 12:45:56 -05:00
Lincoln Stein
350b5205a3 fix crash when --prompt="prompt" is used in CLI (#2579)
- The following were supposed to be equivalent, but the latter crashes:
```
invoke> banana sushi
invoke> --prompt="banana sushi"
```
This PR fixes the problem.

- Fixes #2548
2023-02-08 11:36:20 -05:00
Lincoln Stein
06028e0131 Merge branch 'main' into bugfix/cli-crash-on-prompt-arg 2023-02-08 11:06:48 -05:00
Lincoln Stein
c6d13e679f make menu options in invoke.bat the same as options in invoke.sh
- This makes the launcher options menu on Windows look and act the same
  as the Linux/Mac launcher, which previously was lacking the command-line
  help option and didn't list item (6) as an option.
2023-02-08 11:04:00 -05:00
psychedelicious
72357266a6 fixes #2578 use prompt bug on webkit browsers 2023-02-09 02:25:57 +13:00
Lincoln Stein
9d69843a9d fix screenshot directory name 2023-02-08 07:57:46 -05:00
Lincoln Stein
0547d20b2f crop screenshots 2023-02-08 07:54:27 -05:00
Lincoln Stein
2af6b8fbd8 screenshot revision 2023-02-08 07:46:47 -05:00
psychedelicious
0cee72dba5 fixes #2525 del hotkey doesn't work after canceling
The `useHotkeys` hook for this hotkey didn't have `isConnected` or `isProcessing` in its dependencies array. This prevented `handleDelete()` from dispatching the delete request.
2023-02-09 01:37:55 +13:00
psychedelicious
77c11a42ee fixes #2505 add preserve masked to status text 2023-02-09 01:10:59 +13:00
Lincoln Stein
1d62b4210f First draft of CODEOWNERS (#2558)
This is an early draft of a codeowners file for InvokeAI. It has plenty
of gaps in it. Please use this PR to add yourself and others where
appropriate.
2023-02-08 01:13:45 -05:00
Lincoln Stein
d5a3571c00 Merge branch 'main' into dev/codeowner-assignment 2023-02-08 00:46:31 -05:00
Lincoln Stein
8b2ed9b8fd finished work on INSTALLING MODELS 2023-02-08 00:40:21 -05:00
Lincoln Stein
24792eb5da add CUDA and ROCm installation instructions 2023-02-07 23:02:45 -05:00
Lincoln Stein
614220576f add that forward slashes work too 2023-02-07 23:01:59 -05:00
Lincoln Stein
70bcbc7401 Better AMD clarification (#2536)
To better clarify that AMD is supported when using linux
2023-02-07 22:36:40 -05:00
Lincoln Stein
492605ac3e Merge branch 'main' into patch-1 2023-02-07 22:14:39 -05:00
Lincoln Stein
67f892455f fix crash when --prompt="prompt" is used in CLI
- The following were supposed to be equivalent, but the latter crashes:
```
invoke> banana sushi
invoke> --prompt="banana sushi"
```
This PR fixes the problem.

- Fixes #2548
2023-02-07 22:09:34 -05:00
Lincoln Stein
ae689d1a4a add platform-specific help instructions to installer (#2530)
This adds some platform-specific help messages to the installer welcome
screen:

- For Windows, the message encourages them to install VC++ core
libraries and the registry long name patch
- For MacOSX, the message warns the user to install the XCode tools.
2023-02-07 20:47:58 -05:00
Lincoln Stein
10990799db Merge branch 'main' into dev/codeowner-assignment 2023-02-07 20:29:38 -05:00
Lincoln Stein
c5b4397212 Merge branch 'main' into installer/platform-specific-help 2023-02-07 20:25:02 -05:00
LoganPederson
f62bbef9f7 Update 050_INSTALLING_MODELS.md
I found I needed to use double backslashes to import a new model, when using single backslash the output would say "e:_ProjectsCodemodelsldmstable-diffusion-model-to-import.ckpt is neither the path to a .ckpt file nor a diffusers repository id. Can't import." This added tip in the documentation will help Windows users overcome this.
2023-02-07 18:19:59 -06:00
mauwii
9157da8237 Begun to fill the empty CUDA/ROCm doc
🤡
2023-02-08 00:05:24 +01:00
Lincoln Stein
9c2b9af3a8 Bring main up to 2.3.0-rc6 (#2563)
This bumps up the version number, and also applies a hotfix to the
configure script to fix the problem described in PR #2562
2023-02-07 18:02:13 -05:00
Lincoln Stein
e3419c82e8 Merge branch 'main' into patch-1 2023-02-07 17:45:15 -05:00
Lincoln Stein
65f3d22649 Merge branch 'main' into dev/codeowner-assignment 2023-02-07 17:44:37 -05:00
Lincoln Stein
39b0288595 Merge branch 'main' into 2.3.0rc6 2023-02-07 17:43:38 -05:00
Lincoln Stein
13d12a0ceb Merge branch 'main' into 2.3-documentation-fixes 2023-02-07 17:08:10 -05:00
Lincoln Stein
b92dc8db83 add developer install instructions 2023-02-07 17:04:01 -05:00
Lincoln Stein
b49188a39d doc updates; clean up install directory
- Large rewrite of documentation for automated and manual install.
- Reorganize installer zip file to reduce visual clutter for user.
2023-02-07 16:35:22 -05:00
Lincoln Stein
b9c8270ee6 update manual install doc 2023-02-07 14:19:55 -05:00
Jonathan
f0f3520bca Switch to using max for attention slicing in all cases for the time being. (#2569) 2023-02-07 19:28:57 +01:00
psychedelicious
3efe9899c2 build frontend 2023-02-08 01:53:34 +13:00
psychedelicious
bdbe4660fc switch to @vitejs/plugin-react-swc 2023-02-08 01:53:34 +13:00
psychedelicious
8af9432f63 remove unneeded polyfill 2023-02-08 01:53:34 +13:00
psychedelicious
668d9cdb9d update app build configuration 2023-02-08 01:53:34 +13:00
blessedcoolant
90f5811e59 build (vite-4-code-quality) 2023-02-08 01:53:34 +13:00
blessedcoolant
15d21206a3 Remove build-dev 2023-02-08 01:53:34 +13:00
blessedcoolant
b622286f17 Upgrade to Vite 4 2023-02-08 01:53:34 +13:00
blessedcoolant
176add58b2 Rebase Fix - ModelSelect 2023-02-08 01:53:34 +13:00
psychedelicious
33c5f5a9c2 builds frontend 2023-02-08 01:53:34 +13:00
psychedelicious
2b7752b72e fixes rebase issues 2023-02-08 01:53:34 +13:00
Ryan Cao
5478d2a15e feat: add copy image in share menu 2023-02-08 01:53:34 +13:00
psychedelicious
9ad76fe80c Updates code quality tooling and formats codebase
- `eslint` and `prettier` configs
- `husky` to format and lint via pre-commit hook
- `babel-plugin-transform-imports` to treeshake `lodash` and other packages if needed

Lints and formats codebase.
2023-02-08 01:53:34 +13:00
psychedelicious
d74c4009cb Reorganises internal state
`options` slice was huge and managed a mix of generation parameters and general app settings. It has been split up:

- Generation parameters are now in `generationSlice`.
- Postprocessing parameters are now in `postprocessingSlice`
- UI related things are now in `uiSlice`

There is probably more to be done, like `gallerySlice` perhaps should only manage internal gallery state, and not if the gallery is displayed.

Full-slice selectors have been made for each slice.

Other organisational tweaks.
2023-02-08 01:53:34 +13:00
Lincoln Stein
ffe0e81ec9 Support conversion of inpainting ckpt files to diffusers (#2550)
#     enhance model_manager support for converting inpainting ckpt files
    
Previously conversions of .ckpt and .safetensors files to diffusers
    models were failing with channel mismatch errors. This is corrected
    with this PR.
    
 - The model_manager convert_and_import() method now accepts the path
      to the checkpoint file's configuration file, using the parameter
      `original_config_file`. For inpainting files this should be set to
      the full path to `v1-inpainting-inference.yaml`.
    
- If no configuration file is provided in the call, then the presence
      of an inpainting file will be inferred at the
      `ldm.ckpt_to_diffuser.convert_ckpt_to_diffUser()` level by looking
      for the string "inpaint" in the path. AUTO1111 does something
      similar to this, but it is brittle and not recommended.
    
- This PR also changes the model manager model_names() method to return
      the model names in case folded sort order.
2023-02-07 07:25:30 -05:00
Lincoln Stein
bdf683ec41 Merge branch 'main' into enhance/convert-inpaint-models 2023-02-07 06:59:35 -05:00
Lincoln Stein
28a5424242 Update textual inversion doc with the correct CLI name. (#2560) 2023-02-07 01:22:03 -05:00
Lincoln Stein
8d418af20b Merge branch 'main' into ti-doc-update 2023-02-07 00:59:53 -05:00
Lincoln Stein
055badd611 Diffusers Samplers (#2565)
- Diffusers Sampler list is independent from CKPT Sampler list. And the
app will load the correct list based on what model you have loaded.
- Isolated the activeModelSelector coz this is used in multiple places.
- Possible fix to the white screen bug that some users face. This was
happening because of a possible null in the active model list
description tag. Which should hopefully now be fixed with the new
activeModelSelector.

I'll keep tabs on the last thing. Good to go.
2023-02-07 00:59:32 -05:00
blessedcoolant
944f9e98a7 build (diffusers-samplers) 2023-02-07 18:29:14 +13:00
blessedcoolant
fcffcf5602 Diffusers Samplers
DIsplay sampler list based on the active model.
2023-02-07 18:26:06 +13:00
blessedcoolant
f121dfe120 Update model select to use new active model selector
Hopefully this also fixes the white screen error that some users face.
2023-02-07 18:25:45 +13:00
blessedcoolant
a7dd7b4298 Add activeModelSelector
Active Model details are used in multiple places. So makes sense to have a selector for it.
2023-02-07 18:25:12 +13:00
Lincoln Stein
d94780651c Merge branch 'main' into patch-1 2023-02-07 00:07:31 -05:00
Lincoln Stein
d26abd7f01 add empty CUDA/ROCM install guide 2023-02-07 00:04:56 -05:00
Lincoln Stein
7e2b122105 updated manual install instructions 2023-02-06 23:59:48 -05:00
Lincoln Stein
8a21fc1c50 bump version to 2.3.0-rc6 2023-02-06 23:36:49 -05:00
Lincoln Stein
275d5040f4 Merge branch 'bugfix/configure-script' into 2.3.0rc6 2023-02-06 23:35:32 -05:00
Lincoln Stein
1b5930dcad do not merge diffusers and ckpt stanzas 2023-02-06 23:23:07 -05:00
Lincoln Stein
d5810f6270 Bring main up to date with RC5 (#2555)
Updated the version number
2023-02-06 22:23:58 -05:00
Lincoln Stein
ebc51dc535 incomplete work on manual install 2023-02-06 21:47:29 -05:00
Lincoln Stein
ac6e9238f1 Merge branch 'main' into ti-doc-update 2023-02-06 20:06:33 -05:00
Dan Sully
6343b245ef Update textual inversion doc with the correct CLI name. 2023-02-06 14:51:22 -08:00
Lincoln Stein
8c80da2844 Merge branch 'main' into 2.3.0rc5 2023-02-06 17:38:25 -05:00
Lincoln Stein
a12189e088 fix build-container.yml (#2557)
This should fix the build-container workflow when triggered by a Tag
(that it is failing was mentioned in #2555 )
2023-02-06 15:09:04 -05:00
cosmii02
472c97e4e8 Merge branch 'main' into patch-1 2023-02-06 22:05:47 +02:00
mauwii
5baf0ae755 add mkdocs.yml and pyproject.toml
also make docs separate header
2023-02-06 20:47:20 +01:00
Lincoln Stein
a56e3014a4 Merge branch 'main' into update/ci/refine-build-container 2023-02-06 14:42:02 -05:00
Lincoln Stein
f3eff38f90 add tildebyte areas 2023-02-06 14:38:42 -05:00
Lincoln Stein
53d2d34b3d Merge branch 'main' into 2.3.0rc5 2023-02-06 14:34:16 -05:00
Lincoln Stein
ede7d1a8f7 first draft of codeowners 2023-02-06 14:33:46 -05:00
blessedcoolant
ac23a321b0 build (hires-strength-slider) 2023-02-07 08:22:39 +13:00
blessedcoolant
f52b233205 Add Hi Res Strength Slider 2023-02-07 08:22:39 +13:00
mauwii
8242fc8bad update metadata 2023-02-06 19:58:48 +01:00
Matthias Wild
09b6f7572b Merge branch 'invoke-ai:main' into main 2023-02-06 19:50:40 +01:00
Lincoln Stein
bde6e96800 Merge branch 'main' into 2.3.0rc5 2023-02-06 12:55:47 -05:00
Lincoln Stein
13474e985b Merge branch 'main' into patch-1 2023-02-06 12:54:07 -05:00
Jonathan
28b40bebbe Refactor CUDA cache clearing to add statistical reporting. (#2553) 2023-02-06 12:53:30 -05:00
Lincoln Stein
1c9fd00f98 this is likely the penultimate rc 2023-02-06 12:03:08 -05:00
Lincoln Stein
8ab66a211c force torch reinstall (#2532)
For the torch and torchvision libraries **only**, the installer will now
pass the pip `--force-reinstall` option. This is intended to fix issues
with the user getting a CPU-only version of torch and then not being
able to replace it.
2023-02-06 11:58:57 -05:00
Lincoln Stein
bc03ff8b30 Merge branch 'main' into install/force-torch-reinstall 2023-02-06 11:31:57 -05:00
blessedcoolant
0247d63511 Build (negative-prompt-box) 2023-02-07 05:21:09 +13:00
blessedcoolant
7604b36577 Add Negative Prompts Box 2023-02-07 05:21:09 +13:00
blessedcoolant
4a026bd46e Organize language picker items alphabetically 2023-02-07 05:21:09 +13:00
blessedcoolant
6241fc19e0 Fix the model manager edit placeholder not being full height 2023-02-07 05:21:09 +13:00
blessedcoolant
25d7d71dd8 Slightly decrease the size of the tab list icons 2023-02-07 05:21:09 +13:00
Jonathan
2432adb38f In exception handlers, clear the torch CUDA cache (if we're using CUDA) to free up memory for other programs using the GPU and to reduce fragmentation. (#2549) 2023-02-06 10:33:24 -05:00
Lincoln Stein
91acae30bf Merge branch 'main' into patch-1 2023-02-06 10:14:27 -05:00
Lincoln Stein
ca749b7de1 remove debugging statement 2023-02-06 09:45:21 -05:00
Lincoln Stein
7486aa8608 enhance model_manager support for converting inpainting ckpt files
Previously conversions of .ckpt and .safetensors files to diffusers
models were failing with channel mismatch errors. This is corrected
with this PR.

- The model_manager convert_and_import() method now accepts the path
  to the checkpoint file's configuration file, using the parameter
  `original_config_file`. For inpainting files this should be set to
  the full path to `v1-inpainting-inference.yaml`.

- If no configuration file is provided in the call, then the presence
  of an inpainting file will be inferred at the
  `ldm.ckpt_to_diffuser.convert_ckpt_to_diffUser()` level by looking
  for the string "inpaint" in the path. AUTO1111 does something
  similar to this, but it is brittle and not recommended.

- This PR also changes the model manager model_names() method to return
  the model names in case folded sort order.
2023-02-06 09:35:23 -05:00
mauwii
0402766f4d add author label 2023-02-06 14:05:27 +01:00
mauwii
a9ef5d1532 update tags 2023-02-06 14:05:27 +01:00
Matthias Wild
a485d45400 Update test-invoke-pip.yml (#2524)
test-invoke-pip.yml:
- enable caching of pip dependencies in `actions/setup-python@v4`
- add workflow_dispatch trigger
- fix indentation in concurrency
- set env `PIP_USE_PEP517: '1'`
- cache python dependencies
- remove models cache (since we currently use 190.96 GB of 10 GB while I
am writing this)
- add step to set `INVOKEAI_OUTDIR`
- add outdir arg to invokeai
- fix path in archive results

model_manager.py:
- read files in chunks when calculating sha (windows runner is crashing
otherwise)
2023-02-06 12:56:15 +01:00
mauwii
a40bdef29f update model_manager.py
- read files in chunks when calculating sha
  - windows runner is crashing without
2023-02-06 12:30:10 +01:00
mauwii
fc2670b4d6 update test-invoke-pip.yml
- add workflow_dispatch trigger
- fix indentation in concurrency
- set env `PIP_USE_PEP517: '1'`
- cache python dependencies
- remove models cache (since currently 183.59 GB of 10 GB are Used)
- add step to set `INVOKEAI_OUTDIR`
- add outdir arg to invokeai
- fix path in archive results
2023-02-06 12:30:10 +01:00
Eugene Brodsky
f0cd1aa736 highlight key elements of installer welcome message
- help users to avoid glossing over per-platform prerequisites
- better link colouring
- update link to community instructions to install xcode command line tools
2023-02-06 00:57:29 -05:00
Lincoln Stein
c3807b044d Merge branch 'main' into install/force-torch-reinstall 2023-02-06 00:18:38 -05:00
Jonathan
b7ab025f40 Update base.py (#2543)
Free up CUDA cache right after each image is generated. VRAM usage drops down to pre-generation levels.
2023-02-06 05:14:35 +00:00
Lincoln Stein
633f702b39 fix crash in txt2img and img2img w/ inpainting models and perlin > 0 (#2544)
- get_perlin_noise() was returning 9 channels; fixed code to return
noise for just the 4 image channels and not the mask ones.

- Closes Issue #2541
2023-02-05 23:50:32 -05:00
Lincoln Stein
3969637488 remove misleading completion message from merge_diffusers 2023-02-05 23:39:43 -05:00
Lincoln Stein
658ef829d4 tweak initial model descriptions 2023-02-05 23:23:09 -05:00
Lincoln Stein
0240656361 fix crash in txt2img and img2img w/ inpainting models and perlin > 0
- get_perlin_noise() was returning 9 channels; fixed code to return
  noise for just the 4 image channels and not the mask ones.

- Closes Issue #2541
2023-02-05 22:55:08 -05:00
Lincoln Stein
719a5de506 Merge branch 'main' into patch-1 2023-02-05 21:43:13 -05:00
Matthias Wild
05bb9e444b update pypi_helper.py (#2533)
- dont rename requests
- remove dash in verison (`2.3.0-rc3` becomes `2.3.0rc3`)
- read package_name instead of hardcode it
2023-02-06 03:34:52 +01:00
Lincoln Stein
0076757767 Merge branch 'main' into dev/ci/update-pypi-helper 2023-02-05 21:10:49 -05:00
Lincoln Stein
6ab03c4d08 fix crash in both textual_inversion and merge front ends when not enough models defined (#2540)
- Issue is that if insufficient diffusers models are defined in
models.yaml the frontend would ungraciously crash.

- Now it emits appropriate error messages telling user what the problem
is.
2023-02-05 19:34:07 -05:00
Lincoln Stein
142016827f fix formatting bugs in both textual_inversion and merge front ends
- Issue is that if insufficient diffusers models are defined in
  models.yaml the frontend would ungraciously crash.

- Now it emits appropriate error messages telling user what the problem
  is.
2023-02-05 18:35:01 -05:00
Lincoln Stein
466a82bcc2 Updates frontend README.md (#2539) 2023-02-05 17:25:25 -05:00
Lincoln Stein
05349f6cdc Merge branch 'main' into dev/ci/update-pypi-helper 2023-02-05 17:13:09 -05:00
psychedelicious
ab585aefae Update README.md 2023-02-06 09:07:44 +11:00
Matthias Wild
083ce9358b hotfix build-container.yml (#2537)
fix broken tag
2023-02-05 22:30:23 +01:00
Lincoln Stein
f56cf2400a Merge branch 'main' into install/force-torch-reinstall 2023-02-05 15:40:35 -05:00
cosmii02
5de5e659d0 Better AMD clarification
To better clarify that AMD is supported when using linux
2023-02-05 12:29:50 -08:00
mauwii
fc53f6d47c hotfix build-container.yml 2023-02-05 21:25:44 +01:00
Matthias Wild
2f70daef8f Issue/2487/address docker issues (#2517)
Address issues of #2487
2023-02-05 21:20:13 +01:00
mauwii
fc2a136eb0 add requested change 2023-02-05 21:15:39 +01:00
Lincoln Stein
ce3da40434 Merge branch 'main' into install/force-torch-reinstall 2023-02-05 15:01:56 -05:00
mauwii
7933f27a72 update pypi_helper.py`
- dont rename requests
- remove dash in verison (`2.3.0-rc3` becomes `2.3.0rc3`)
- read package_name instead of hardcode it
2023-02-05 20:45:31 +01:00
mauwii
1c197c602f update Dockerfile, .dockerignore and workflow
- dont build frontend since complications with QEMU
- set pip cache dir
- add pip cache to all pip related build steps
- dont lock pip cache
- update dockerignore to exclude uneeded files
2023-02-05 20:20:50 +01:00
mauwii
90656aa7bf update Dockerfile
- add build arg `FRONTEND_DIR`
2023-02-05 20:20:50 +01:00
mauwii
394b4a771e update Dockerfile
- remove yarn install args `--prefer-offline` and `--production=false`
2023-02-05 20:20:50 +01:00
mauwii
9c3f548900 update settings output in build.sh 2023-02-05 20:20:50 +01:00
mauwii
5662d2daa8 add invokeai/frontend/dist/** to .dockerignore 2023-02-05 20:20:50 +01:00
mauwii
fc0f966ad2 fix docs 2023-02-05 20:20:50 +01:00
mauwii
eb702a5049 fix env.sh, update Dockerfile, update build.sh
env.sh:
- move check for torch to CONVTAINER_FLAVOR detection

Dockerfile
- only mount `/var/cache/apt` for apt related steps
- remove `docker-clean` from `/etc/apt/apt.conf.d` for BuildKit cache
- remove apt-get clean for BuildKit cache
- only copy frontend to frontend-builder
- mount `/usr/local/share/.cache/yarn` in frountend-builder
- separate steps for yarn install and yarn build
- build pytorch in pyproject-builder

build.sh
- prepare for installation with extras
2023-02-05 20:20:50 +01:00
mauwii
1386d73302 fix env.sh
only try to auto-detect CUDA/ROCm if torch is installed
2023-02-05 20:20:50 +01:00
mauwii
6089f33e54 fix HUGGING_FACE_HUB_TOKEN 2023-02-05 20:20:50 +01:00
mauwii
3a260cf54f update directory from docker-build to docker 2023-02-05 20:20:50 +01:00
mauwii
9949a438f4 update docs with newly added variables
also remove outdated information
2023-02-05 20:20:50 +01:00
mauwii
84c1122208 fix build.sh and env.sh 2023-02-05 20:20:50 +01:00
Lincoln Stein
cc3d431928 2.3.0rc4 (#2514)
This will bring main up to date with v2.3.0-rc4
2023-02-05 14:05:15 -05:00
Lincoln Stein
c44b060a2e Merge branch 'main' into 2.3.0rc4 2023-02-05 13:40:56 -05:00
Lincoln Stein
eff7fb89d8 installer will --force-reinstall torch 2023-02-05 13:39:46 -05:00
Lincoln Stein
cd5c112fcd Allow multiple models to be imported by passing a directory. (#2529)
This change allows passing a directory with multiple models in it to be
imported.

Ensures that diffusers directories will still work.

Fixed up some minor type issues.
2023-02-05 13:36:00 -05:00
Lincoln Stein
563867fa99 Merge branch 'main' into main 2023-02-05 12:51:03 -05:00
Lincoln Stein
2e230774c2 Merge branch 'main' into 2.3.0rc4 2023-02-05 12:44:44 -05:00
Lincoln Stein
9577410be4 add platform-specific help instructions to installer 2023-02-05 12:43:13 -05:00
Lincoln Stein
4ada4c9f1f Add --log_tokenization to sysargs (#2523)
This allows the --log_tokenization option to be used as a command line
argument (or from invokeai.init), making it possible to view
tokenization information in the terminal when using the web interface.
2023-02-05 11:55:26 -05:00
blessedcoolant
9a6966924c Merge branch 'main' into main 2023-02-06 05:33:48 +13:00
Lincoln Stein
0d62525f3d reword help message slightly 2023-02-05 08:11:02 -08:00
Dan Sully
2ec864e37e Allow multiple models to be imported by passing a directory. 2023-02-05 08:11:02 -08:00
Lincoln Stein
9307ce3dc3 this fixes a crash in the TI frontend (#2527)
- This fixes an edge case crash when the textual inversion frontend
  tried to display the list of models and no default model defined
  in models.yaml

Co-authored-by: Jonathan <34005131+JPPhoto@users.noreply.github.com>
2023-02-05 16:05:33 +00:00
Lincoln Stein
15996446e0 Merge branch 'main' into 2.3.0rc4 2023-02-05 10:54:53 -05:00
blessedcoolant
7a06c8fd89 Merge branch 'main' into main 2023-02-06 04:43:49 +13:00
Lincoln Stein
4895fe8395 fix crash when text mask applied to img2img (#2526)
This PR fixes the crash reported at https://discord.com/channels/1020123559063990373/1031668022294884392/1071782238137630800

It also quiets-down the "NSFW is disabled" nag during img2img generation.
2023-02-05 15:26:40 +00:00
Lincoln Stein
1e793a2dfe Merge branch 'main' into 2.3.0rc4 2023-02-05 10:24:09 -05:00
blessedcoolant
9c8fcaaf86 Beautify & Cleanup WebUI Logs 2023-02-05 22:55:57 +13:00
blessedcoolant
bf4344be51 Beautify Usage Stats Log 2023-02-05 22:55:40 +13:00
blessedcoolant
f7532cdfd4 Beautify Token Log Outputs 2023-02-05 22:55:29 +13:00
blessedcoolant
f1dd76c20b Remove Deprecation Warning from Diffusers Pipeline 2023-02-05 22:55:10 +13:00
whosawhatsis
3016eeb6fb Merge branch 'invoke-ai:main' into main 2023-02-04 22:56:59 -05:00
whosawhatsis
75b62d6ca8 Add --log_tokenization to sysargs
This allows the --log_tokenization option to be used as a command line argument (or from invokeai.init), making it possible to view tokenization information in the terminal when using the web interface.
2023-02-04 19:56:20 -08:00
Lincoln Stein
82ae2769c8 Configuration script tidying up (#2513)
- Rename configure_invokeai.py to invokeai_configure.py to be consistent
with installed script name
- Remove warning message about half-precision models not being available
during the model download process.
- adjust estimated file size reported by configure
- guesstimate disk space needed for "all" models
- fix up the "latest" tag to be named 'v2.3-latest'
2023-02-04 21:58:56 -05:00
Lincoln Stein
61149abd2f Merge branch 'main' into lstein/normalize-names 2023-02-04 21:41:22 -05:00
Lincoln Stein
eff126af6e Merge branch 'main' into 2.3.0rc4 2023-02-04 21:40:47 -05:00
Matthias Wild
0ca499cf96 Add workflow for PyPI Release (#2516) 2023-02-05 00:31:00 +01:00
mauwii
3abf85e658 fix conditions
workflow will only run in official repo
2023-02-04 23:58:07 +01:00
mauwii
5095285854 fix pypi-release.yml 2023-02-04 23:46:10 +01:00
mauwii
93623a4449 add conditions to check for Repo and Secret 2023-02-04 23:22:23 +01:00
mauwii
0197459b02 change back to current version 2023-02-04 23:07:20 +01:00
mauwii
1578bc68cc change version to test workflow 2023-02-04 23:06:29 +01:00
mauwii
4ace397a99 remove debug steps 2023-02-04 23:05:29 +01:00
mauwii
d85a710211 rename pypi_helper.py 2023-02-04 23:00:39 +01:00
mauwii
536d534ab4 add pypi-release.yml and pypi-helper.py 2023-02-04 22:58:21 +01:00
Lincoln Stein
fc752a4e75 move old .venv directory away during install
- To ensure a clean environment, the installer will now detect whether a
  previous .venv exists in the install location, and move it to .venv-backup
  before creating a fresh .venv.

- Any previous .venv-backup is deleted.

- User is informed of process.
2023-02-04 16:14:29 -05:00
Lincoln Stein
3c06d114c3 fix name of latest tag 2023-02-04 14:04:24 -05:00
Lincoln Stein
00d79c1fe3 bump version number to rc4 2023-02-04 14:00:58 -05:00
Lincoln Stein
60213893ab configuration script tidying up
- Rename configure_invokeai.py to invokeai_configure.py to be
  consistent with installed script name
- Remove warning message about half-precision models not being
  available during the model download process.

- adjust estimated file size reported by configure

- guesstimate disk space needed for "all" models

- fix up the "latest" tag to be named 'v2.3-latest'
2023-02-04 13:55:36 -05:00
Lincoln Stein
3b58413d9f Fixes PYTORCH_ENABLE_MPS_FALLBACK not set correctly (#2508)
`torch` wasn't seeing the environment variable. I suspect this is
because it was imported before the variable was set, so was running with
a different environment.

Many `torch` ops are supported on MPS so this wasn't noticed
immediately, but some samplers like k_dpm_2 still use unsupported
operations and need this fallback.
2023-02-04 11:32:52 -05:00
Lincoln Stein
1139884493 Merge branch 'main' into fix/mps-fallback 2023-02-04 11:11:59 -05:00
Lincoln Stein
17e8f966d0 Fix registration of text masks (#2501)
- Scale and crop not applied correctly
- Problem found and fixed by @spezialspezial
- Closes #2470
2023-02-04 10:48:27 -05:00
Lincoln Stein
a42b25339f Merge branch 'main' into bugfix/txt2mask 2023-02-04 10:25:30 -05:00
Lincoln Stein
1b0731dd1a use torch-cu117 from download.torch.org rather than pypi (#2492)
This PR forces the installer to install the official torch-cu117 wheel
from download.torch.org, rather than relying on PyPi.org to return the
correct version. It ought to correct the problems that some people have
experienced with cuda support not being installed.
2023-02-04 10:04:22 -05:00
Lincoln Stein
61c3886843 Merge branch 'main' into bugfix/use-cu117-wheel 2023-02-04 09:43:52 -05:00
Lincoln Stein
f76d57637e Fix bugs in merge and convert process (#2491)
1. The convert module was converting ckpt models into
StableDiffusionGeneratorPipeline objects for use in-memory, but then
when saved to disk created files that could not be merged with
StableDiffusionPipeline models. I have added a flag that selects which
pipeline class to return, so that both in-memory and disk conversions
work properly.

2. This PR also fixes an issue with `invoke.sh` not using the correct
path for the textual inversion and merge scripts.

3. Quench nags during the merge process about the safety checker being
turned off.
2023-02-04 09:40:09 -05:00
Lincoln Stein
6bf73a0cf9 Merge branch 'main' into bugfix/use-cu117-wheel 2023-02-04 09:17:45 -05:00
Lincoln Stein
5145df21d9 Merge branch 'main' into bugfix/merge-fixes 2023-02-04 09:17:01 -05:00
blessedcoolant
e96ac61cb3 Add Ukranian Localization (#2486)
* Add Ukranian & Update Italian

* Frontend Build (Ukranian Localization)

* Update invokeai/frontend/dist/locales/hotkeys/ua.json

Co-authored-by: Eugene Brodsky <ebr@users.noreply.github.com>

* UA Localization Fixes

* Build (ua-fixes)

* Clean Build

* Clear Build

* Clean Build (resolving main conflicts)

* Clear Build

* Frontend Build (ua-localization-rebased)

---------

Co-authored-by: Eugene Brodsky <ebr@users.noreply.github.com>
2023-02-05 00:24:24 +13:00
blessedcoolant
0e35d829c1 Build (french-localization) 2023-02-04 23:14:25 +13:00
blessedcoolant
d08f048621 Add French Localization 2023-02-04 23:14:25 +13:00
Saifeddine ALOUI
cfd453c1c7 Added French localization 2023-02-04 23:14:25 +13:00
psychedelicious
a1b1a48fb3 Fixes PYTORCH_ENABLE_MPS_FALLBACK not set correctly
`torch` wasn't seeing the environment variable. I suspect this is because it was imported before the variable was set, so was running with a different environment.

Many `torch` ops are supported on MPS so this wasn't noticed immediately, but some samplers like k_dpm_2 still use unsupported operations and need this fallback.
2023-02-04 17:27:33 +11:00
Eugene Brodsky
b5160321bf fix finding the wheel when running from outside the installer directory
in case of calling python script instead of shell
2023-02-03 23:50:57 -05:00
Lincoln Stein
0cc2a8176e bump version 2023-02-03 23:50:57 -05:00
Lincoln Stein
9ac81c1dc4 change latest tag to v2.2.3-latest, won\'t conflict with 2.2.5 latest tag 2023-02-03 23:50:57 -05:00
Lincoln Stein
50191774fc this fixes an issue when the install script is called outside its directory
- Also reimplements the python-path finding logic of the older install.sh script.
2023-02-03 23:50:57 -05:00
Eugene Brodsky
fcd9b813e3 Merge branch 'main' into bugfix/use-cu117-wheel 2023-02-03 23:13:22 -05:00
Lincoln Stein
813f92a1ae do not install the "update" script
- The update script doesn't work yet, so we shouldn't install it.
- For now, users update by re-running the installer.
2023-02-03 20:26:10 -05:00
Lincoln Stein
0d141c1d84 small f-string syntax fix in generate.py (#2483)
Probably low priority, but helps the error message be more clear by
hopefully displaying model_name.
2023-02-03 18:29:50 -05:00
Lincoln Stein
2e3cd03b27 Merge branch 'main' into bugfix/use-cu117-wheel 2023-02-03 18:15:54 -05:00
Lincoln Stein
4500c8b244 Merge branch 'main' into patch-2 2023-02-03 18:03:29 -05:00
Lincoln Stein
d569c9dec6 remove dead code 2023-02-03 17:35:35 -05:00
Matthias Wild
01a2b8c05b Adapt latest changes to Dockerfile (#2478)
* remove non maintained Dockerfile

* adapt Docker related files to latest changes
- also build the frontend when building the image
- skip user response if INVOKE_MODEL_RECONFIGURE is set
- split INVOKE_MODEL_RECONFIGURE to support more than one argument

* rename `docker-build` dir to `docker`

* update build-container.yml
- rename image to invokeai
- add cpu flavor
- add metadata to build summary
- enable caching
- remove build-cloud-img.yml

* fix yarn cache path, link copyjob
2023-02-03 22:34:47 +00:00
Lincoln Stein
b23664c794 registration of mask images was off due to typo
- Problem found and fixed by @spezialspezial
- Closes #2470
2023-02-03 17:32:35 -05:00
Lincoln Stein
f06fefcacc Merge branch 'main' into patch-2 2023-02-03 17:15:29 -05:00
Lincoln Stein
7fa3a499bb fix crash on Windows10 when configure script given no HF token
Crashes would occur in the invokeai-configure script if no HF token
was found in cache and the user declines to provide one when prompted.
The reason appears to be that on Linux systems getpass_asterisk()
raises an EOFError when no input is provided

On windows10, getpass_asterisk() does not raise the EOFError, but
returns an empty string instead. This patch detects this and raises
the exception so that the control logic is preserved.
2023-02-03 16:06:49 -05:00
Lincoln Stein
c50b64ec1d correct default menu entry in install.bat file 2023-02-03 13:30:21 -05:00
Lincoln Stein
76b0bdb6f9 Fix: upgrade fails if existing venv was created with symlinks (#2489)
if reinstalling over an existing installation where the .venv was
created with symlinks to system python instead of copies of the python
executable, the installer would raise a `SameFileError`, because it
would attempt to copy Python over itself. This fixes the issue.

Copying the executable is still preferred for new environments, because
this guarantees the stable Python version.
2023-02-03 13:29:20 -05:00
Lincoln Stein
b0ad109886 Merge branch 'main' into fix-samefile 2023-02-03 13:02:01 -05:00
Lincoln Stein
66b312c353 enhance console gui for invokeai-merge (#2480)
- Added modest adaptive behavior; if the screen is wide enough the three
checklists of models will be arranged in a horizontal row.
- Added color support
# What it looks like
On a wide window:

![image](https://user-images.githubusercontent.com/111189/216495149-0ceed761-b829-4b21-8e90-0b7faf2c7b72.png)
On a narrow window:

![image](https://user-images.githubusercontent.com/111189/216495239-1d6615cf-0e7e-44fe-83d7-513819635d8a.png)
2023-02-03 13:00:16 -05:00
Lincoln Stein
fc857f9d91 Merge branch 'main' into lstein/enhance-merge-models-gui 2023-02-03 12:36:23 -05:00
Lincoln Stein
d6bd0cbf61 Bugfixes for path finding during manual install (#2490)
- fixes bug in finding the source of the configs dir;
- updates the docs for manual install to clarify the preference to
keeping the `.venv` inside the runtime dir, and the caveat/extra steps
required if done otherwise
2023-02-03 11:02:47 -05:00
Lincoln Stein
a32f6e9ea7 use torch-cu117 from download.torch.org rather than pypi 2023-02-03 10:57:15 -05:00
Lincoln Stein
b41342a779 Merge branch 'main' into bugfix/config-manual-install 2023-02-03 10:28:18 -05:00
Lincoln Stein
7603c8982c feat: add copy image in share menu (#2484)
<img width="233" alt="Screenshot 2023-02-03 at 12 11 46"
src="https://user-images.githubusercontent.com/70191398/216510761-3e5013a3-5346-45d4-92e5-d913d035f1bc.png">
2023-02-03 10:27:54 -05:00
Lincoln Stein
d351e365d6 Merge branch 'main' into lstein/enhance-merge-models-gui 2023-02-03 10:27:32 -05:00
Lincoln Stein
d453afbf6b Merge branch 'main' into fix-samefile 2023-02-03 10:27:03 -05:00
Lincoln Stein
9ae55c91cc quench safety checker warnings from diffusers 2023-02-03 10:14:51 -05:00
Lincoln Stein
9e46badc40 convert no longer creates StableDiffusionGenerator pipelines unless asked to 2023-02-03 10:04:32 -05:00
Lincoln Stein
ca0f3ec0e4 fix launcher shell script to use correct names for ti and merge functions 2023-02-03 09:45:57 -05:00
Eugene Brodsky
4b9be6113d (docs) remove an obsolete symlink to a documentation file 2023-02-03 09:01:54 -05:00
Eugene Brodsky
31964c7c4c (docs) remove an obsolete manual install doc 2023-02-03 09:01:30 -05:00
Eugene Brodsky
64f9fbda2f (docs) update manual install documentation 2023-02-03 08:51:46 -05:00
psychedelicious
3ece2f19f0 Merge branch 'main' into share-copy-image 2023-02-04 00:46:48 +11:00
Eugene Brodsky
c38b0b906d (config) fix invokeai-configure path handling after manual install 2023-02-03 08:06:27 -05:00
Lincoln Stein
c79678a643 prevent crash when no default model defined 2023-02-03 02:27:50 -05:00
Lincoln Stein
2217998010 remove the environments-and-requirements directory 2023-02-03 01:49:30 -05:00
Eugene Brodsky
3b43f3a5a1 (installer) fix failure to create venv over an existing venv
if reinstalling over an existing installation where the .venv
was created with symlinks to system python instead of copies
of the python executable, the installer would raise a
SameFileError, because it would attempt to copy Python over
itself. This fixes the issue.
2023-02-03 00:36:28 -05:00
Lincoln Stein
3f193d2b97 attempted correction of white screen issue 2023-02-02 23:47:55 -05:00
Ryan Cao
9fe660c515 feat: add copy image in share menu 2023-02-03 12:10:33 +08:00
gogurtenjoyer
16356d5225 small f-string fix in generate.py
Probably low priority, but helps the error message be more clear by hopefully displaying model_name.
2023-02-02 19:33:17 -08:00
Lincoln Stein
e04cb70c7c rebuild front end 2023-02-02 21:55:01 -05:00
Lincoln Stein
ddd5137cc6 Update version 2023-02-02 21:17:53 -05:00
Lincoln Stein
b9aef33ae8 enhance console gui for invokeai-merge
- Added modest adaptive behavior; if the screen is wide enough the three
  checklists of models will be arranged in a horizontal row.
- Added color support
2023-02-02 20:26:45 -05:00
gogurtenjoyer
797e2f780d Add python version warning from the docs
Just a quick update about Python 3.11.
2023-02-02 19:28:49 -05:00
Lincoln Stein
0642728484 remove requirements step from install manual (#2442)
removing the step to link the requirements file from the docs for manual
Installation after commenting about it in #2431
2023-02-02 16:50:29 -05:00
Lincoln Stein
fe9b4f4a3c Merge branch 'main' into update/docs/remove-requirements-step 2023-02-02 16:14:45 -05:00
Lincoln Stein
756e50f641 Installer rewrite in Python (#2448)
## Summary

This PR rewrites the core of the installer in Python for cross-platform
compatibility. Filesystem path manipulation, platform/arch decisions and
various edge cases are handled in a more convenient fashion. The
original `install.bat.in`/`install.sh.in` scripts are kept as
entrypoints for their respective OSs, but only serve as thin wrappers to
the Python module.

In addition, it:

- builds and **packages the .whl with the installer**, so that
downloading a versioned installer will guarantee installation of the
same version of the application.
- updates shell entrypoints: 
- new commands are `invokeai`, `invokeai-configure`, `invokeai-ti`,
`invokeai-merge`.
- these commands will be available in the activated `.venv` or via the
launch scripts
- `invoke.py` and `configure_invokeai.py` scripts are deprecated but
kept around for backwards compatibility and keeping users' surprise to a
minimum.
- introduces a new `ldm/invoke/config` package and moves the
`configure_invokeai` script into it. Similarly, movers Textual Inversion
script and TUI to `ldm/invoke/training`.
- moves the `configs` directory into the `ldm/invoke/config` package for
easy distribution.
- updates documentation to reflect all of the above changes
- fixes a failing test
- reduces wheel size to 3MB (from 27MB) by excluding unnecessary image
files under `assets`

⚠️ self-updating functionality and ability to install arbitrary
versions are still WIP. For now we can recommend downloading and running
the installer for a specific version as desired.

## Testing the source install

From the cloned source, check out this branch, and:

`$ python3 installer/main.py --root <path_to_destination>`

Also try:

`$ python3 installer/main.py ` - will prompt for paths
`$ python3 installer/main.py --yes` - will not prompt for any input

- try to combine the `--yes` and `--root` options
- try to install in destinations with "quirky" paths, such as paths
containing spaces in the directory name, etc.

## Testing the packaged install ("Automated Installer"):

Download the
[InvokeAI-installer-v2.3.0+a0.zip](https://github.com/invoke-ai/InvokeAI/files/10533913/InvokeAI-installer-v2.3.0%2Ba0.zip)
file, unzip it, and run the install script for your platform (preferably
in a terminal window)

OR make your own: from the cloned source, check out this branch, and:

```
cd installer
./create_installer.sh
# (do NOT tag/push when prompted! just say "no")
```

This will create the installation media:
`InvokeAI-installer-v2.3.0+a0.zip`. The installer is now
*platform-agnostic* - meaning, both Windows and *nix install resources
are packaged together.

Copy it somewhere as if it had been downloaded from the internet. Unzip
the file, enter the created `InvokeAI-Installer` directory, and run
`install.sh` or `install.bat` as applicable your platform.

⚠️ NOTE!!! `install.sh` accepts the same arguments as are
applicable to the Python script, i.e. you can `install.sh --yes --root
....`. This is NOT yet supported by the Windows `.bat` script. Only
interactive installation is supported on Windows. (this is still a
TODO).
2023-02-02 16:08:10 -05:00
Lincoln Stein
2202288eb2 Merge branch 'main' into dev/installer 2023-02-02 15:17:40 -05:00
Lincoln Stein
fc3378bb74 Load legacy ckpt files as diffusers models (#2468)
* refactor ckpt_to_diffuser to allow converted pipeline to remain in memory

- This idea was introduced by Damian
- Note that although I attempted to use the updated HuggingFace module
  pipelines/stable_diffusion/convert_from_ckpt.py, it was unable to
  convert safetensors files for reasons I didn't dig into.
- Default is to extract EMA weights.

* add --ckpt_convert option to load legacy ckpt files as diffusers models

- not quite working - I'm getting artifacts and glitches in the
  converted diffuser models
- leave as draft for time being

* do not include safety checker in converted files

* add ability to control which vae is used

API now allows the caller to pass an external VAE model to the
checkpoint conversion process. In this way, if an external VAE is
specified in the checkpoint's config stanza, this VAE will be used
when constructing the diffusers model.

Tested with both regular and inpainting 1.X models.

Not tested with SD 2.X models!

---------

Co-authored-by: Jonathan <34005131+JPPhoto@users.noreply.github.com>
Co-authored-by: Damian Stewart <null@damianstewart.com>
2023-02-02 20:15:44 +00:00
Lincoln Stein
96228507d2 Merge branch 'main' into dev/installer 2023-02-02 14:30:35 -05:00
Lincoln Stein
1fe5ec32f5 Swap codeowners for installer (#2477)
This PR changes the codeowner for the installer directory from
@tildebyte to @ebr due to the former's time commitments.

Further reorganization of the codeowners is pending.
2023-02-02 14:27:31 -05:00
Lincoln Stein
6dee9051a1 swap codeowners for installer 2023-02-02 13:54:53 -05:00
Lincoln Stein
d58574ca46 Merge branch 'main' into dev/installer 2023-02-02 13:53:11 -05:00
Lincoln Stein
d282000c05 swap tildebyte to ebr as code owner 2023-02-02 13:52:45 -05:00
Kevin Turner
80c5322ccc fix(img2img): do not attempt to do a zero-step img2img when strength is low (#2472) 2023-02-02 10:04:09 -08:00
Kevin Turner
da181ce64e Merge branch 'main' into fix/img2img-low-strength 2023-02-02 09:40:16 -08:00
Kevin Turner
5ef66ca237 Fix typo in xformers version, 0.16 -> 0.0.16 (#2475) 2023-02-02 09:39:08 -08:00
Lincoln Stein
e99e720474 resolve conflicts with main and rebuild frontend 2023-02-02 11:00:33 -05:00
Kevin Turner
7aa331af8c Merge branch 'main' into fix/img2img-low-strength 2023-02-02 07:20:34 -08:00
noodlebox
9e943ff7dc Fix typo in xformers version, 0.16 -> 0.0.16 2023-02-02 05:26:15 -05:00
Kent Keirsey
b5040ba8d0 Build 2023-02-02 22:52:03 +13:00
Kent Keirsey
07462d1d99 Remove Inpaint Replace 2023-02-02 22:52:03 +13:00
Eugene Brodsky
d273fba42c (installer) upgrade pip in python3.9 environments 2023-02-02 01:30:47 -05:00
Eugene Brodsky
735545dca1 (installer) remove pip from bootstrap venv requirements as it was breaking bootstrapping 2023-02-02 01:18:02 -05:00
Eugene Brodsky
328f87559b (installer) remove leftover debug logs; fix typo 2023-02-02 01:03:51 -05:00
Eugene Brodsky
6f10b06a0c (installer) clarify user messaging during destination directory selection 2023-02-02 01:03:51 -05:00
Eugene Brodsky
fd60c8297d (package) provide more legacy aliases to entrypoints to minimize user surprise 2023-02-02 01:03:51 -05:00
Lincoln Stein
480064fa06 pip won't install itself without --upgrade 2023-02-02 00:48:53 -05:00
Lincoln Stein
3810d6a4ce numerous tweaks
1. only load triton on linux machines
2. require pip >= 23.0 so that editable installs can run without setup.py
3. model files default to SD-1.5, not 2.1
4. use diffusers model of inpainting rather than ckpt
5. selected a new set of initial models based on # of likes at huggingface
2023-02-02 00:28:38 -05:00
Kevin Turner
44d36a0e0b fix(img2img): do not attempt to do a zero-step img2img when strength is low 2023-02-01 18:42:54 -08:00
Lincoln Stein
3996ee843c fix bugs in launcher script installation
- launcher scripts are installed *before* the configure script runs,
  so that if something goes wrong in the configure script, the user
  can run invoke.{sh,bat} and get the option to re-run configure
- fixed typo in invoke.sh which misspelled name of invokeai-configure
2023-02-01 19:14:07 -05:00
Lincoln Stein
6d966313b9 add a --find-links argument to import custom wheels 2023-02-01 19:03:15 -05:00
Lincoln Stein
8ce9f07223 Merge branch 'main' into dev/installer 2023-02-01 17:50:22 -05:00
Lincoln Stein
11ac50a6ea install xformers and triton when CUDA torch requested 2023-02-01 17:41:38 -05:00
Matthias Wild
31146eb797 add workflow to clean caches after PR gets closed (#2450)
This helps at least a bit to get rid of all those huge caches
2023-02-01 19:06:07 +01:00
mauwii
99cd598334 add workflow to clean caches after PR gets closed 2023-02-01 18:32:29 +01:00
Lincoln Stein
5441be8169 requirements: add xformers for CUDA platforms (#2465)
[xformers
0.16](https://github.com/facebookresearch/xformers/releases/tag/v0.0.16)
was released earlier today, and is now installable from wheels on PyPI!

Fixes #1876.
2023-02-01 10:59:13 -05:00
Lincoln Stein
3e98b50b62 Merge branch 'main' into req-xformers 2023-02-01 10:29:49 -05:00
Lincoln Stein
5f16148dea Prevent actions from running on draft PRs (#2457)
Draft PRs are triggering actions on every commit (except
`test-invoke-pip.yml`).

I've added a conditional to each job to only run when the PR is not a
draft.

(maybe there is a reason we are running all applicable workflows on
draft PRs?)
2023-02-01 00:33:15 -05:00
Lincoln Stein
9628d45a92 Merge branch 'main' into build/no-actions-on-draft 2023-02-01 00:15:30 -05:00
Eugene Brodsky
6cbdd88fe2 (installer) correctly call invokeai entrypoints in .bat launch script 2023-02-01 00:08:18 -05:00
Eugene Brodsky
d423db4f82 (meta) add copyright statements for installer code 2023-01-31 23:47:36 -05:00
Eugene Brodsky
5c8c204a1b (installer) fix regression in directory selection 2023-01-31 23:47:36 -05:00
Eugene Brodsky
a03471c588 (installer) hide system and user site packages from the installer 2023-01-31 23:47:36 -05:00
Kevin Turner
6608343455 [enhancement] Print status message at startup when xformers is available (#2461) 2023-01-31 19:11:17 -08:00
Kevin Turner
abd972f099 Merge branch 'main' into feat/xformers-startup-message 2023-01-31 18:48:09 -08:00
Lincoln Stein
bd57793a65 fix img2img by working around pytorch bug (#2458)
horribly, temporarily send the vae to `.cpu()` so that good latents can
be produced

closes #2418
2023-01-31 21:46:05 -05:00
Kevin Turner
8cdc65effc Merge branch 'main' into fix_2418_simplified 2023-01-31 17:45:54 -08:00
Kevin Turner
85b553c567 requirements: add xformers for CUDA platforms
Now available from pip!
2023-01-31 16:51:43 -08:00
Matthias Wild
af74a2d1f4 fix broken Dockerfile (#2445)
also switch to `python:3.9-slim` since it has a ton less security issues
2023-02-01 01:47:25 +01:00
mauwii
6fdc9ac224 re-enable INVOKE_MODEL_RECONFIGURE 2023-02-01 01:21:07 +01:00
mauwii
8107d354d9 fix broken Dockerfile
also switch to python 3.9:slim since it has a ton less security issues
2023-02-01 01:21:07 +01:00
mauwii
7ca8abb206 integrate required changes
- also remove conda related things
- rename `invoke` to `invokeai`
- rename `configure_invokeai` to `invokeai-configure`
- rename venv back to common `.venv` but add `--prompt InvokeAI`
- remove outdated information
2023-02-01 01:17:24 +01:00
Lincoln Stein
28c17613c4 feat(inpaint): add solid infill for use with inpainting model (#2441)
A new infill method, **solid:** solid color. currently using middle
gray.

Fixes #2417

It seems like the runwayml inpainting model specifically expects those
masked areas to be blanked out like this.

I haven't tried the SD 2.0 inpainting model with it yet.
2023-01-31 18:27:48 -05:00
mauwii
eeb7a4c28c (ci) disable py3.9, lin-cuda-11_6 and win cuda 2023-02-01 00:24:56 +01:00
mauwii
0009d82a92 update test_path.py to also verify caution.png 2023-02-01 00:22:28 +01:00
Lincoln Stein
e6d52d7ce6 Merge branch 'main' into fix_2418_simplified 2023-01-31 18:11:56 -05:00
Lincoln Stein
8c726d3e3e Merge branch 'main' into build/no-actions-on-draft 2023-01-31 18:08:52 -05:00
Lincoln Stein
56e2d22b6e Merge branch 'main' into feat/solid-infill 2023-01-31 18:02:17 -05:00
Lincoln Stein
053d11fe30 fix(inpainting model): blank areas to be repainted in the masked image (#2447)
Otherwise the model seems too reluctant to change these areas, even
though the mask channel should allow it to.

This makes the solid infill method proposed by #2441 less necessary,
though I think there's still a place for an infill method that is faster
than patchmatch and more predictable than tiles.

Even with #2441, this PR is still useful because it influences all areas
to be painted, not just the infill area.

Fixes #2417
2023-01-31 18:01:33 -05:00
Lincoln Stein
0066187651 Merge branch 'main' into feat/solid-infill 2023-01-31 17:53:09 -05:00
Lincoln Stein
d3d24fa816 fill color is parameterized 2023-01-31 17:52:33 -05:00
Kevin Turner
4d58fed6b0 Merge branch 'main' into fix/inpainting-blank-slate 2023-01-31 11:04:56 -08:00
Kevin Turner
bde5874707 fix dimension errors when inpainting model is used with hires-fix (#2440) 2023-01-31 11:04:02 -08:00
Kevin Turner
eed802f5d9 Merge branch 'main' into fix/hires_inpaint 2023-01-31 09:34:29 -08:00
Lincoln Stein
c13e11a264 Merge branch 'dev/installer' of github.com:invoke-ai/InvokeAI into dev/installer 2023-01-31 12:26:19 -05:00
Lincoln Stein
1c377b7995 further improvements to ability to find location of data files
- implement the following pattern for finding data files under both
  regular and editable install conditions:

  import invokeai.foo.bar as bar
  path = bar.__path__[0]

- this *seems* to work reliably with Python 3.9. Testing on 3.10 needs
  to be performed.
2023-01-31 12:24:55 -05:00
mauwii
efe8dcaae9 cleanup test_path.py, enable pytest in pipeline
temporary enable 3.9 tests as well
2023-01-31 18:18:32 +01:00
Lincoln Stein
fc8e3dbcd3 fix crash when editing name of model
- fixes a spurious "unknown model name" error when trying to edit the
  short name of an existing model.
- relaxes naming requirements to include the ':' and '/' characters
  in model names
2023-01-31 09:59:58 -05:00
mauwii
ec1e83e912 add pytest to test path of frontend and configs 2023-01-31 09:06:06 +01:00
mauwii
ab9daf1241 remove frontend from configure_invokeai.py
since it does not get accessed there at all
2023-01-31 08:15:48 +01:00
mauwii
c061c1b1b6 fix frontend path
point to package's path instead of searching for it
2023-01-31 08:15:20 +01:00
Lincoln Stein
b9cc56593e print status message at startup when xformers is available 2023-01-30 22:01:06 -05:00
psychedelicious
6a0e1c8673 Merge branch 'main' into build/no-actions-on-draft 2023-01-31 12:00:38 +11:00
Kevin Turner
371edc993a Implement .swap() against diffusers 0.12 (#2385) 2023-01-30 15:56:24 -08:00
Lincoln Stein
d71734c90d update frontend path in lint test 2023-01-30 18:48:43 -05:00
Lincoln Stein
9ad4c03277 Various fixes
1) Downgrade numpy to avoid dependency conflict with numba
2) Move all non ldm/invoke files into `invokeai`. This includes assets, backend, frontend, and configs.
3) Fix up way that the backend finds the frontend and the generator finds the NSFW caution.png icon.
2023-01-30 18:42:17 -05:00
Damian Stewart
5299324321 workaround for pytorch bug, fixes #2418 2023-01-30 18:45:53 +01:00
Damian Stewart
817e36f8bf Merge branch 'diffusers_cross_attention_control_reimplementation' of github.com:damian0815/InvokeAI into diffusers_cross_attention_control_reimplementation 2023-01-30 16:23:52 +01:00
Damian Stewart
d044d4c577 rename override/restore methods to better reflect what they actually do 2023-01-30 16:23:44 +01:00
Damian Stewart
3f1120e6f2 Merge branch 'main' into diffusers_cross_attention_control_reimplementation 2023-01-30 16:17:25 +01:00
Damian Stewart
17d73d09c0 Revert "with diffusers cac, always run the original prompt on the first step"
This reverts commit 27ee939e4b.
2023-01-30 15:38:03 +01:00
Damian Stewart
478c379534 for cac make t_start=0.1 the default 2023-01-30 15:30:01 +01:00
Damian Stewart
c5c160a788 Merge branch 'diffusers_cross_attention_control_reimplementation' of github.com:damian0815/InvokeAI into diffusers_cross_attention_control_reimplementation 2023-01-30 14:51:06 +01:00
Damian Stewart
27ee939e4b with diffusers cac, always run the original prompt on the first step 2023-01-30 14:50:57 +01:00
psychedelicious
c222cf7e64 Prevents actions from running on draft PRs 2023-01-30 22:28:05 +11:00
Eugene Brodsky
b2a3b8bbf6 (installer) fix the create_installer.sh script so it instructs the user to deactivate an active venv 2023-01-30 03:42:27 -05:00
Eugene Brodsky
11cb03f7de (installer) fall back to attempted source install if wheel not found
if running `python3 installer/main.py` from the source distribution,
it would fail because it expected to find a wheel.

this PR tries to perform a source install by going one level up the directory
tree and checking for `pyproject.toml` and `ldm` directory entries to
confirm (to a degree) that this is an InvokeAI distribution
2023-01-30 03:29:09 -05:00
Eugene Brodsky
6b1dc34523 (installer) improve selection of destination directory 2023-01-30 03:15:05 -05:00
Eugene Brodsky
44786b0496 (installer) improve function naming 2023-01-29 23:39:14 -05:00
Lincoln Stein
d9ed0f6005 fix documentation of huggingface cache location (#2430)
* fix documentation of huggingface cache location

---------

Co-authored-by: Jonathan <34005131+JPPhoto@users.noreply.github.com>
2023-01-29 20:30:50 -06:00
Eugene Brodsky
2e7a002308 (installer) remove unnecessary shell options from the install wrapper script 2023-01-29 20:10:51 -05:00
Jonathan
5ce62e00c9 Merge branch 'main' into diffusers_cross_attention_control_reimplementation 2023-01-29 13:52:01 -06:00
Kevin Turner
5a8c28de97 Merge remote-tracking branch 'origin/main' into fix/hires_inpaint 2023-01-29 10:51:59 -08:00
Jonathan
07e03b31b7 Update --hires_fix (#2414)
* Update --hires_fix

Change `--hires_fix` to calculate initial width and height based on the model's resolution (if available) and with a minimum size.
2023-01-29 12:27:01 -06:00
Eugene Brodsky
5ee5c5a012 (training) correctly import TI module; fix type annotation 2023-01-28 19:09:16 -05:00
Eugene Brodsky
3075c99ed2 (ci) fix test that was failng due to CLI entrypoint change 2023-01-28 19:03:48 -05:00
Eugene Brodsky
2c0bee2a6d (config) ensure the correct 'invokeai' command is displayed to the user after configuration 2023-01-28 17:39:33 -05:00
Eugene Brodsky
8f86aa7ded (docs) update install docs to refer to the platform-agnostic installer 2023-01-28 17:39:33 -05:00
Eugene Brodsky
34e0d7aaa8 (config) rename all mentions of scripts/configure_invokeai.py to the new invokeai-configure command 2023-01-28 17:39:33 -05:00
Eugene Brodsky
abe4e1ea91 (scripts) improved script entrypoints 2023-01-28 17:39:33 -05:00
Eugene Brodsky
f1f8ce604a (installer) build .whl and distribute together with the installer; install from bundled .whl by default 2023-01-28 17:39:33 -05:00
Eugene Brodsky
47dbe7bc0d (assets) move 'caution.png' to avoid including entire 'assets' dir in the wheel
reduces wheel size to 3MB from 27MB
2023-01-28 17:39:33 -05:00
Eugene Brodsky
ebe6daac56 (installer) do not install if already in a venv 2023-01-28 17:39:33 -05:00
Eugene Brodsky
d209dab881 (installer) support both pip and source install; no longer support installing from a downloaded release .zip 2023-01-28 17:39:33 -05:00
Eugene Brodsky
2ff47cdecf (scripts) rename/reorganize CLI scripts
- add torch MPS fallback directly to CLI.py
- rename CLI scripts with `invoke-...` prefix
- delete long-deprecated scripts
- add a missing package dependency
- delete setup.py as obsolete
2023-01-28 17:39:33 -05:00
Eugene Brodsky
22c34aabfe (package) move TI scripts into a module; update packaging of 'configs' dir 2023-01-28 17:39:33 -05:00
Eugene Brodsky
b58a80109b (test) tweak pytest coverage options
- remove redundant options (unchanged from defaults)
- don't test 3rd party code
- omit fully covered files from coverage report
- gitignore junit (xml) test output directory
2023-01-28 17:39:33 -05:00
Eugene Brodsky
c5a9e70e7f (parser) fix missing argument default in parse_legacy_blend 2023-01-28 17:39:33 -05:00
Eugene Brodsky
c5914ce236 (installer) new torch index urls + support installation from PyPi 2023-01-28 17:39:33 -05:00
Eugene Brodsky
242abac12d (installer) add a --y[es[to_all]] argument for a fully hands-off install/config 2023-01-28 17:39:33 -05:00
Eugene Brodsky
4b659982b7 (installer) install.bat wrapper for the python script 2023-01-28 17:39:33 -05:00
Eugene Brodsky
71733bcfa1 (installer) copy launch/update scripts to the root dir; improve launch experience on Linux/Mac
- install.sh is now a thin wrapper around the pythonized install script
- install.bat not done yet - to follow
- user messaging is tailored to the current platform (paste shortcuts, file paths, etc)
- emit invoke.sh/invoke.bat scripts to the runtime dir
- improve launch scripts (add help option, etc)
- only emit the platform-specific scripts
2023-01-28 17:39:33 -05:00
Eugene Brodsky
d047e070b8 (config) fix config file creation in edge cases
if the config directory is missing, initialize it using the standard
process of copying it over, instead of failing to create the config file

this can happen if the user is re-running the config script in a directory which
already has the init file, but no configs dir
2023-01-28 17:39:33 -05:00
Eugene Brodsky
02c530e200 (installer) work around Windows install issues 2023-01-28 17:39:33 -05:00
Eugene Brodsky
d36bbb817c (installer) use pep517 for installing dependencies
the 'setup.py install' method is deprecated in favour of a
build-system independent format: https://peps.python.org/pep-0517/

this is needed to install dependencies that don't have a pyproject.toml
file (only setup.py) in a forward-compatible way
2023-01-28 17:39:33 -05:00
Eugene Brodsky
9997fde144 (config) moving the 'configs' dir into the 'config' module
This allows reliable distribution of the initial 'configs' directory
with the Python package, and enables the configuration script to be running
from anywhere, as long as the virtual environment is available on the sys.path
2023-01-28 17:39:33 -05:00
Eugene Brodsky
9e22ed5c12 (installer) ignore temporary venv cleanup errors on Windows
There is a race condition affecting the 'tempfile' module on Windows.
A PermissionsError is raised when cleaning up the temp dir
Python3.10 introduced a flag to suppress this error.

Windows + Python3.9 users will receive an unpleasant stack trace for now
2023-01-28 17:39:32 -05:00
Eugene Brodsky
169c56e471 (installer) install PyTorch from correct repositories 2023-01-28 17:39:32 -05:00
Eugene Brodsky
b186965e77 (installer) ask the user for their GPU type; improve other messaging 2023-01-28 17:39:32 -05:00
Eugene Brodsky
88526b9294 (config) move configure_invokeai script to the config module for easier importing 2023-01-28 17:39:32 -05:00
Eugene Brodsky
071a438745 (installer) add graphics accelerator selection 2023-01-28 17:39:32 -05:00
Eugene Brodsky
93129fde32 (installer) run configure_invokeai from within the installer 2023-01-28 17:39:32 -05:00
Eugene Brodsky
802b95b9d9 (installer) use prompt-toolkit for directory picking instead of tkinter 2023-01-28 17:39:32 -05:00
Eugene Brodsky
c279314cf5 (installer) use plumbum for better stdout streaming 2023-01-28 17:39:32 -05:00
Eugene Brodsky
f75b194b76 (installer) PoC to install the app (source installer style) into the app venv 2023-01-28 17:39:32 -05:00
Eugene Brodsky
bf1996bbcf (installer) add venv creation for the app 2023-01-28 17:39:32 -05:00
Eugene Brodsky
d3962ab7b5 (installer) Windows fixes 2023-01-28 17:39:32 -05:00
Eugene Brodsky
2296f5449e (installer) initial work on the installer 2023-01-28 17:39:32 -05:00
Kevin Turner
b6d37a70ca fix(inpainting model): threshold mask to avoid gray blurry seam 2023-01-28 13:34:22 -08:00
Kevin Turner
71b6ddf5fb fix(inpainting model): blank areas to be repainted in the masked image
Otherwise the model seems too reluctant to change these areas, even though the mask channel should allow it to.
2023-01-28 11:10:32 -08:00
mauwii
14de7ed925 remove requirements step from install manual 2023-01-28 00:58:32 +01:00
Kevin Turner
6556b200b5 remove experimental "blur" infill
It seems counterproductive for use with the inpainting model, and not especially useful otherwise.
2023-01-27 15:25:50 -08:00
Kevin Turner
d627cd1865 feat(inpaint): add simpler infill methods for use with inpainting model 2023-01-27 14:28:16 -08:00
Kevin Turner
09b6104bfd refactor(txt2img2img): factor out tensor shape 2023-01-27 12:04:12 -08:00
Kevin Turner
1bb5b4ab32 fix dimension errors when inpainting model is used with hires-fix 2023-01-27 11:52:05 -08:00
Lincoln Stein
c18db4e47b removed defunct textual inversion script (#2433)
The original textual inversion script in scripts is now superseded. The
replacement can be found in ldm/invoke/textual_inversion.py and is a
merging of the command line and front end scripts. After running `pip
install -e .` there will be a `textual_inversion` command on your path.
You can activate the front end this way:

`textual_inversion -gui`
2023-01-27 10:44:34 -05:00
Jonathan
f9c92e3576 Merge branch 'main' into bugfix/remove-defunct-scripts 2023-01-27 08:32:15 -06:00
psychedelicious
1ceb7a60db adds double-click to reset view to 100% (#2436)
Adds double-click to reset canvas view to 100%.

- Adds hook to manage single and double clicks
- Single Click `Reset Canvas View` --> scale to fit, no change to
current behaviour
- Double Click `Reset Canvas View` --> set scale to 1
2023-01-28 00:56:24 +11:00
psychedelicious
f509650ec5 adds double-click to reset view to 100% 2023-01-27 08:30:24 -05:00
psychedelicious
0d0f35a1e2 Fix download button styling (#2435)
fixes #2383
2023-01-28 00:29:34 +11:00
psychedelicious
6dbc42fc1a fixes download button styling 2023-01-27 20:23:12 +11:00
Lincoln Stein
f6018fe5aa removed defunct textual inversion script 2023-01-26 23:35:09 -05:00
blessedcoolant
e4cd66216e Frontend Build (diffusers-mm-fixes) 2023-01-27 17:23:25 +13:00
blessedcoolant
995fbc78c8 Diffusers Model Manager Fixes 2023-01-27 17:23:25 +13:00
blessedcoolant
3083f8313d Default Seam Steps to 30
Seems to be the temporary solution for the seams looking horrible with some diffuser models.
2023-01-27 17:23:25 +13:00
Lincoln Stein
c0614ac7f3 Improve configuration of initial Waifu models (#2426)
Testing suggests that the diffusers versions of Waifu-1.4 anything-v4.0
require the `sd-vae-ft-mse` to generate decent images, so the
appropriate arguments have been added to the initial model file.
2023-01-26 18:18:00 -05:00
Lincoln Stein
0186630514 Merge branch 'main' into install/better-initial-models 2023-01-26 17:42:10 -05:00
Lincoln Stein
d53df09203 [enhancement] Improve organization & behavior of model merging and textual inversion scripts (#2427)
- Model merging and textual inversion scripts have been moved into
`ldm/invoke`, which allows them to be installed properly by
pyproject.toml.
- As part of the pyproject install, the .py suffix is removed from the
command. I.e. use `invoke`, `configure_invokeai`, `merge_models` and
`textual_inversion`.
- GUI versions are activated by adding `--gui` to the command. Without
this, you get a classical argv-based command. Example: `merge_models
--gui`
- Fixed up the launcher scripts to accommodate new naming scheme.
- Keyboard behavior of the GUI front ends has been improved. You can now
use up and down arrow to move from field to field, in addition to <tab>
and ctrl-N/ctrl-P
2023-01-26 17:36:45 -05:00
Lincoln Stein
12a29bfbc0 Merge branch 'main' into install/change-script-locations 2023-01-26 17:10:33 -05:00
Lincoln Stein
f36114eb94 Fix Sliders unable to take typed input (#2407)
So far the slider component was unable to take typed input due to a
bunch of issues that were a pain to solve. This PR fixes it.

Things to test:

- Moving the slider also updates the value in the input text box.
- Input text box next to slider can be changed in two ways: If you type
a manual value, the slider will be updated when you lose focus from the
input box. If you use the stepper icons to update the values, the slider
should update immediately.
- Make sure the reset buttons next to the slider are updating correctly
and make sure this updates both the slider and the input box values.
- Brush Size slider -> make sure the hotkeys are updating the input box
too.
2023-01-26 17:10:16 -05:00
Lincoln Stein
c255481c11 Merge branch 'main' into slider-fix 2023-01-26 16:20:25 -05:00
Lincoln Stein
7f81105acf dev: update to diffusers 0.12, transformers 4.26 (#2420)
Happy New Year!
2023-01-26 16:18:37 -05:00
Lincoln Stein
c8de679dc3 Merge branch 'main' into update-diffusers 2023-01-26 15:43:41 -05:00
Lincoln Stein
85b18fe9ee Merge branch 'main' into install/better-initial-models 2023-01-26 15:42:13 -05:00
Lincoln Stein
e0d8c19da6 fix indentation problem 2023-01-26 15:39:59 -05:00
Lincoln Stein
5567808237 tweak documentation 2023-01-26 15:28:54 -05:00
Lincoln Stein
2817f8a428 update launcher shell scripts for new script names & paths 2023-01-26 15:26:38 -05:00
Lincoln Stein
8e4c044ca2 clean up tab/cursor behavior in textual inversion txt gui 2023-01-26 15:18:28 -05:00
Lincoln Stein
9dc3832b9b clean up merge_models 2023-01-26 15:10:16 -05:00
Lincoln Stein
046abb634e Remove dependency on original clipseg library for text masking (#2425)
- This replaces the original clipseg library with the transformers
version from HuggingFace.
- This should make it possible to register InvokeAI at PyPi and do a
fully automated pip-based install.
- Minor regression: it is no longer possible to specify which device the
clipseg model will be loaded into, and it will reside in CPU. However,
performance is more than acceptable.
2023-01-26 12:14:13 -05:00
Lincoln Stein
d3a469d136 fix location of textual_inversion script 2023-01-26 11:56:23 -05:00
Lincoln Stein
e79f89b619 improve initial model configuration 2023-01-26 11:53:06 -05:00
Lincoln Stein
cbd967cbc4 add documentation caveat about location of HF cached models 2023-01-26 11:48:03 -05:00
damian
e090c0dc10 try without setting every time 2023-01-26 17:46:51 +01:00
damian
c381788ab9 don't restore None 2023-01-26 17:44:27 +01:00
damian
fb312f9ed3 use the correct value - whoops 2023-01-26 17:30:29 +01:00
damian
729752620b trying out JPPhoto's patch on vast.ai 2023-01-26 17:27:33 +01:00
damian
8ed8bf52d0 use 'auto' slice size 2023-01-26 17:04:22 +01:00
Lincoln Stein
a49d546125 simplified code a bit 2023-01-26 09:46:34 -05:00
Lincoln Stein
288e31fc60 remove dependency on original clipseg library
- This replaces the original clipseg library with the transformers
  version from HuggingFace.
- This should make it possible to register InvokeAI at PyPi and do
  a fully automated pip-based install.
- Minor regression: it is no longer possible to specify which device
  the clipseg model will be loaded into, and it will reside in CPU.
  However, performance is more than acceptable.
2023-01-26 09:35:16 -05:00
Lincoln Stein
7b2c0d12a3 add missing VAEs to initial diffuser models 2023-01-26 00:25:39 -05:00
Kevin Turner
2978c3eb8d Merge branch 'main' into update-diffusers 2023-01-25 18:42:00 -08:00
Damian Stewart
5e7ed964d2 wip updating docs 2023-01-25 23:49:38 +01:00
Damian Stewart
93a24445dc Merge remote-tracking branch 'upstream/main' into diffusers_cross_attention_control_reimplementation 2023-01-25 23:05:39 +01:00
Damian Stewart
95d147c5df MPS support: negatory 2023-01-25 23:03:30 +01:00
Damian Stewart
41aed57449 wip tracking down MPS slicing support 2023-01-25 22:27:23 +01:00
Damian Stewart
34a3f4a820 cleanup 2023-01-25 21:47:17 +01:00
Damian Stewart
1f5ad1b05e sliced swap working 2023-01-25 21:38:27 +01:00
blessedcoolant
87c63f1f08 Slider Fix Build 2023-01-26 09:04:52 +13:00
blessedcoolant
5b054dd5b7 Conflict Resolved Build (slider-fix) 2023-01-26 09:04:20 +13:00
blessedcoolant
fc5c8cc800 Merge branch 'main' into slider-fix 2023-01-26 09:03:02 +13:00
blessedcoolant
eb2ca4970b Add Dutch Localization Build 2023-01-26 08:56:38 +13:00
blessedcoolant
c2b10e6461 Add Dutch Localization 2023-01-26 08:56:38 +13:00
Dennis
190d266060 Dutch localization 2023-01-26 08:56:38 +13:00
Kevin Turner
8c8e1a448d dev: update to diffusers 0.12, transformers 4.26
Happy New Year!
2023-01-25 10:51:56 -08:00
Damian Stewart
c52dd7e3f4 Merge branch 'diffusers_cross_attention_control_reimplementation' of github.com:damian0815/InvokeAI into diffusers_cross_attention_control_reimplementation 2023-01-25 14:51:15 +01:00
Damian Stewart
a4aea1540b more wip sliced attention (.swap doesn't work yet) 2023-01-25 14:51:08 +01:00
Kevin Turner
3c53b46a35 Merge branch 'main' into diffusers_cross_attention_control_reimplementation 2023-01-24 19:32:34 -08:00
blessedcoolant
65fd6cd105 Merge branch 'main' into slider-fix 2023-01-25 08:28:37 +13:00
Lincoln Stein
61403fe306 fix second conflict in CLI.py 2023-01-24 14:21:21 -05:00
Lincoln Stein
b2f288d6ec fix conflict in CLI.py 2023-01-24 14:20:40 -05:00
blessedcoolant
d1d12e4f92 Merge branch 'main' into slider-fix 2023-01-25 08:06:30 +13:00
Lincoln Stein
eaf7934d74 [Enhancements] Allow user to specify VAE with !import_model and delete underlying model with !del_model (#2369)
Fix two deficiencies in the CLI's support for model management:

1. `!import_model` did not allow user to specify VAE file. This is now
fixed.
2. `!del_model` did not offer the user the opportunity to delete the
underlying
       weights file or diffusers directory. This is now fixed.
2023-01-24 13:43:16 -05:00
Lincoln Stein
079ec4cb5c Merge branch 'main' into feat/import-with-vae 2023-01-24 13:16:00 -05:00
blessedcoolant
38d0b1e3df Merge branch 'main' into slider-fix 2023-01-25 07:14:26 +13:00
blessedcoolant
fc6500e819 Fix Inpaint Replace Slider 2023-01-25 07:13:01 +13:00
Lincoln Stein
f521f5feba improve UI of textual inversion frontend (#2333)
- File selection box now accepts directories that don't exist yet.
- Fixed crash when resume is selected and no files available to resume
from.
2023-01-24 12:22:17 -05:00
Lincoln Stein
ce865a8d69 Merge branch 'main' into slider-fix 2023-01-24 12:21:39 -05:00
Lincoln Stein
00839d02ab Merge branch 'main' into lstein-improve-ti-frontend 2023-01-24 11:53:03 -05:00
Lincoln Stein
ce52d0c42b Merge branch 'main' into feat/import-with-vae 2023-01-24 11:52:40 -05:00
Lincoln Stein
f687d90bca [feat] Better status reporting when loading embeds and concepts (#2372)
This PR improves the console reporting of the process of recognizing
trigger tokens and loading their embeds.

1. Do not report "concept is not known to HuggingFace" if the trigger
term is in fact a local embedding trigger.
2. When a trigger term is first recognized during a session, report the
fact.
This should help debug embedding issues in the future.

Note that the local embeddings produced by the new InvokeAI TI training
script default to the format <trigger> with literal angle brackets. This
sets them off from the rest of the text well and will enable
autocomplete at some point in the future. However, this means that they
supersede like-named HuggingFace concepts, and may cause problems for
people uploading them to the HuggingFace repository (although that
problem already exists).
2023-01-24 09:35:53 -05:00
Lincoln Stein
7473d814f5 remove original setup.py 2023-01-24 09:11:05 -05:00
Lincoln Stein
b2c30c2093 Merge branch 'main' into bugfix/embed-loading-messages 2023-01-24 09:08:13 -05:00
Lincoln Stein
a7048eea5f Merge branch 'main' into feat/import-with-vae 2023-01-24 09:07:41 -05:00
Lincoln Stein
87c9398266 [enhancement] import .safetensors ckpt files directly (#2353)
This small fix makes it possible to import and run safetensors ckpt
files directly without doing a conversion step first.
2023-01-24 09:06:49 -05:00
Damian Stewart
63c6019f92 sliced attention processor wip (untested) 2023-01-24 14:46:32 +01:00
blessedcoolant
8eaf0d8bfe Fix Slider Build 2023-01-24 16:44:58 +13:00
blessedcoolant
5344481809 Fix Slider not being able to take typed input 2023-01-24 16:43:29 +13:00
Lincoln Stein
9f32daab2d Merge branch 'main' into lstein-import-safetensors 2023-01-23 21:58:07 -05:00
Lincoln Stein
884768c39d Make sure --free_gpu_mem still works when using CKPT-based diffuser model (#2367)
This PR attempts to fix `--free_gpu_mem` option that was not working in
CKPT-based diffuser model after #1583.

I noticed that the memory usage after #1583 did not decrease after
generating an image when `--free_gpu_mem` option was enabled.
It turns out that the option was not propagated into `Generator`
instance, hence the generation will always run without the memory saving
procedure.

This PR also related to #2326. Initially, I was trying to make
`--free_gpu_mem` works on 🤗 diffuser model as well.
In the process, I noticed that InvokeAI will raise an exception when
`--free_gpu_mem` is enabled.
I tried to quickly fix it by simply ignoring the exception and produce a
warning message to user's console.
2023-01-23 21:48:23 -05:00
Lincoln Stein
bc2194228e stability improvements
- provide full traceback when a model fails to load
- fix VAE record for VoxelArt; otherwise load fails
2023-01-23 21:40:27 -05:00
Lincoln Stein
10c3afef17 Merge branch 'main' into bugfix/free-gpu-mem-diffuser 2023-01-23 21:15:12 -05:00
Lincoln Stein
98e9721101 correct fail-to-resume error
- applied https://github.com/huggingface/diffusers/pull/2072 to fix
  error in epoch calculation that caused script not to resume from
  latest checkpoint when asked to.
2023-01-23 21:04:07 -05:00
blessedcoolant
66babb2e81 Japanese Localization Build 2023-01-24 09:07:29 +13:00
blessedcoolant
31a967965b Add Japanese Localization 2023-01-24 09:07:29 +13:00
Katsuyuki-Karasawa
b9c9b947cd update japanese translation 2023-01-24 09:07:29 +13:00
唐澤 克幸
1eee08a070 add Japanese Translation 2023-01-24 09:07:29 +13:00
Lincoln Stein
aca1b61413 [Feature] Add interactive diffusers model merger (#2388)
This PR adds `scripts/merge_fe.py`, which will merge any 2-3 diffusers
models registered in InvokeAI's `models.yaml`, producing a new merged
model that will be registered as well.

Currently this script will only work if all models to be merged are
known by their repo_ids. Local models, including those converted from
ckpt files, will cause a crash due to a bug in the diffusers
`checkpoint_merger.py` code. I have made a PR against
huggingface/diffusers which fixes this:
https://github.com/huggingface/diffusers/pull/2060
2023-01-23 09:27:05 -05:00
Lincoln Stein
e18beaff9c Merge branch 'main' into feat/merge-script 2023-01-23 09:05:38 -05:00
Kevin Turner
d7554b01fd fix typo in prompt 2023-01-23 00:24:06 -08:00
Kevin Turner
70f8793700 Merge branch 'main' into feat/import-with-vae 2023-01-23 00:17:46 -08:00
Kevin Turner
0d4e6cbff5 Merge branch 'main' into bugfix/embed-loading-messages 2023-01-23 00:12:33 -08:00
Kevin Turner
ea61bf2c94 [bugfix] ckpt conversion script respects cache in ~/invokeai/models (#2395) 2023-01-23 00:07:23 -08:00
Lincoln Stein
7dead7696c fixed setup.py to install the new scripts 2023-01-23 00:43:15 -05:00
Lincoln Stein
ffcc5ad795 conversion script uses invokeai models cache by default 2023-01-23 00:35:16 -05:00
Lincoln Stein
48deb3e49d add model merging documentation and launcher script menu entries 2023-01-23 00:20:28 -05:00
Lincoln Stein
6c31225d19 create small module for merge importation logic 2023-01-22 18:07:53 -05:00
Damian Stewart
c0610f7cb9 pass missing value 2023-01-22 18:19:06 +01:00
Damian Stewart
313b206ff8 squash float16/float32 mismatch on linux 2023-01-22 18:13:12 +01:00
Lincoln Stein
f0fe483915 Merge branch 'main' into feat/merge-script 2023-01-21 18:42:40 -05:00
Lincoln Stein
4ee8d104f0 working, but needs diffusers PR to be accepted 2023-01-21 18:39:13 -05:00
Kevin Turner
89791d91e8 fix: use pad_token for padding (#2381)
Stable Diffusion 2 does not use eos_token for padding.

Fixes #2378
2023-01-21 13:30:03 -08:00
Kevin Turner
87f3da92e9 Merge branch 'main' into fix/sd2-padding-token 2023-01-21 13:11:02 -08:00
Lincoln Stein
f169bb0020 fix long prompt weighting bug in ckpt codepath (#2382) 2023-01-21 15:14:14 -05:00
Damian Stewart
155efadec2 Merge branch 'main' into fix/sd2-padding-token 2023-01-21 21:05:40 +01:00
Damian Stewart
bffe199ad7 SwapCrossAttnProcessor working - tested on mac CPU (MPS doesn't work) 2023-01-21 20:54:18 +01:00
Damian Stewart
0c2a511671 wip SwapCrossAttnProcessor 2023-01-21 18:07:36 +01:00
Damian Stewart
e94c8fa285 fix long prompt weighting bug in ckpt codepath 2023-01-21 12:08:21 +01:00
Lincoln Stein
b3363a934d Update index.md (#2377) 2023-01-21 00:17:23 -05:00
Lincoln Stein
599c558c87 Merge branch 'main' into patch-1 2023-01-20 23:54:40 -05:00
Kevin Turner
d35ec3398d fix: use pad_token for padding
Stable Diffusion does not use the eos_token for padding.
2023-01-20 19:25:20 -08:00
Lincoln Stein
96a900d1fe correctly import diffusers models by their local path
- Corrects a bug in which the local path was treated as a repo_id
2023-01-20 20:13:43 -05:00
Lincoln Stein
f00f7095f9 Add instructions for installing xFormers on linux (#2360)
I've written up the install procedure for xFormers on Linux systems.

I need help with the Windows install; I don't know what the build
dependencies (compiler, etc) are. This section of the docs is currently
empty.

Please see `docs/installation/070_INSTALL_XFORMERS.md`
2023-01-20 17:57:12 -05:00
mauwii
d7217e3801 disable instable CI tests for windows runners
therefore enable all pytorch versions to verify installation
2023-01-20 23:30:25 +01:00
mauwii
fc5fdae562 update installation instructions 2023-01-20 23:30:25 +01:00
mauwii
a491644e56 fix dependencies/requirements 2023-01-20 23:30:24 +01:00
mauwii
ec2a509e01 make images in README.md compatible to pypi
also add missing new-lines before/after headings
2023-01-20 23:30:24 +01:00
mauwii
6a3a0af676 update test-invoke-pip.yml
- remove stable-diffusion-model from matrix
- add windows-cuda-11_6 and linux-cuda-11_6
- enable linux-cpu
- disable windows-cpu
- change step order
- remove job env
- set runner.os specific env
- install editable
- cache models folder
- remove `--model` and `--root` arguments from invoke command
2023-01-20 23:30:24 +01:00
mauwii
ef4b03289a enable image generating step for windows as well
- also remove left over debug lines and development branch leftover
2023-01-20 23:30:24 +01:00
mauwii
963b666844 fix memory issue on windows runner
- use cpu version which is only 162.6 MB
- set `INVOKEAI_ROOT=C:\InvokeAI` on Windows runners
2023-01-20 23:30:24 +01:00
mauwii
5a788f8f73 fix test-invoke-pip.yml matrix 2023-01-20 23:30:24 +01:00
mauwii
5afb63e41b replace legacy setup.py with pyproject.toml
other changes which where required:
- move configure_invokeai.py into ldm.invoke
- update files which imported configure_invokeai to use new location:
    - ldm/invoke/CLI.py
    - scripts/load_models.py
    - scripts/preload_models.py
- update test-invoke-pip.yml:
    - remove pr type "converted_to_draft"
    - remove reference to dev/diffusers
    - remove no more needed requirements from matrix
    - add pytorch to matrix
    - install via `pip3 install --use-pep517 .`
    - use the created executables
        - this should also fix configure_invoke not executed in windows
To install use `pip install --use-pep517 -e .` where `-e` is optional
2023-01-20 23:30:24 +01:00
Lincoln Stein
279ffcfe15 Merge branch 'main' into lstein/xformers-instructions 2023-01-20 17:29:39 -05:00
Lincoln Stein
9b73292fcb add pip install documentation for xformers 2023-01-20 17:28:14 -05:00
Lincoln Stein
67d91dc550 Merge branch 'bugfix/embed-loading-messages' of github.com:invoke-ai/InvokeAI into bugfix/embed-loading-messages 2023-01-20 17:16:50 -05:00
Lincoln Stein
a1c0818a08 ignore .DS_Store files when scanning Mac embeddings 2023-01-20 17:16:39 -05:00
Lincoln Stein
2cf825b169 Merge branch 'main' into bugfix/embed-loading-messages 2023-01-20 17:14:46 -05:00
Lincoln Stein
292b0d70d8 Merge branch 'lstein-improve-ti-frontend' of github.com:invoke-ai/InvokeAI into lstein-improve-ti-frontend 2023-01-20 17:14:08 -05:00
Lincoln Stein
c3aa3d48a0 ignore .DS_Store files when scanning Mac embeddings 2023-01-20 17:13:32 -05:00
Lincoln Stein
9e3c947cd3 Merge branch 'main' into lstein-improve-ti-frontend 2023-01-20 17:01:09 -05:00
Lincoln Stein
4b8aebabfb add diffusers repo as a reference for further reading 2023-01-20 16:59:34 -05:00
Lincoln Stein
080fc4b380 add documentation and minor bug fixes
- Added new documentation for textual inversion training process
- Move `main.py` into the deprecated scripts folder
- Fix bug in `textual_inversion.py` which was causing it to not load
  the globals module correctly.
- Sort models alphabetically in console front end
- Only show diffusers models in console front end
2023-01-20 16:55:50 -05:00
Lincoln Stein
195294e74f sort models alphabetically 2023-01-20 15:17:54 -05:00
michaelk71
da81165a4b Update index.md 2023-01-20 19:03:12 +01:00
Lincoln Stein
f3ff386491 [enhancement] Reorganize form for textual inversion training (#2375)
- Add num_train_epochs
- Reorganize widgets so all sliders that control # of steps are together
2023-01-20 10:58:26 -05:00
Lincoln Stein
da524f159e Merge branch 'main' into feat/enhance-ti-training-ui 2023-01-20 10:28:27 -05:00
Lincoln Stein
2d1eeec063 Save HFToken only if it is present (#2370)
Fixes https://github.com/invoke-ai/InvokeAI/issues/2083
2023-01-19 22:16:19 -05:00
Nicholas Koh
a8bb1a1109 Save HFToken only if it is present 2023-01-19 21:47:27 -05:00
Lincoln Stein
d9fa505412 [feat] Provide option to disable xformers from command line (#2373)
Starting `invoke.py` with --no-xformers will disable
memory-efficient-attention support if xformers is installed.

For symmetry, `--xformers` will enable support, but this is already the
default if xformers is available.
2023-01-19 19:15:57 -05:00
Lincoln Stein
02ce602a38 Merge branch 'main' into feat/disable-xformers 2023-01-19 18:45:59 -05:00
Lincoln Stein
9b1843307b [enhancement] Reorganize form for textual inversion training
- Add num_train_epochs
- Reorganize widgets so all sliders that control # of steps are together
2023-01-19 18:43:12 -05:00
Lincoln Stein
f0010919f2 Merge branch 'main' into bugfix/free-gpu-mem-diffuser 2023-01-19 18:03:36 -05:00
Lincoln Stein
d113b4ad41 [bugfix] suppress extraneous warning messages generated by diffusers (#2374)
This commit suppresses a few irrelevant warning messages that the
diffusers module produces:

1. The warning that turning off the NSFW detector makes you an
irresponsible person.
2. Warnings about running fp16 models stored in CPU (we are not running
them in CPU, just caching them in CPU RAM)
2023-01-19 18:00:31 -05:00
Lincoln Stein
895505976e [bugfix] suppress extraneous warning messages generated by diffusers
This commit suppresses a few irrelevant warning messages that the
diffusers module produces:

1. The warning that turning off the NSFW detector makes you an
irresponsible person.
2. Warnings about running fp16 models stored in CPU (we are not running
   them in CPU, just caching them in CPU RAM)
2023-01-19 16:49:40 -05:00
Lincoln Stein
171f4aa71b [feat] Provide option to disable xformers from command line
Starting `invoke.py` with --no-xformers will disable
memory-efficient-attention support if xformers is installed.

--xformers will enable support, but this is already the
default.
2023-01-19 16:16:35 -05:00
Lincoln Stein
775e1a21c7 improve embed trigger token not found error
- Now indicates that the trigger is *neither* a huggingface concept,
  nor the trigger of a locally loaded embed.
2023-01-19 15:46:58 -05:00
Lincoln Stein
3c3d893b9d improve status reporting when loading local and remote embeddings
- During trigger token processing, emit better status messages indicating
  which triggers were found.
- Suppress message "<token> is not known to HuggingFace library, when
  token is in fact a local embed.
2023-01-19 15:43:52 -05:00
Lincoln Stein
33a5c83c74 during ckpt->diffusers tell user when custom autoencoder can't be loaded
- When a ckpt or safetensors file uses an external autoencoder and we
  don't know which diffusers model corresponds to this (if any!), then
  we fallback to using stabilityai/sd-vae-ft-mse
- This commit improves error reporting so that user knows what is happening.
2023-01-19 12:05:49 -05:00
Lincoln Stein
7ee0edcb9e when converting a ckpt/safetensors model, preserve vae in diffusers config
- After successfully converting a ckt file to diffusers, model_manager
  will attempt to create an equivalent 'vae' entry to the resulting
  diffusers stanza.

- This is a bit of a hack, as it relies on a hard-coded dictionary
  to map ckpt VAEs to diffusers VAEs. The correct way to do this
  would be to convert the VAE to a diffusers model and then point
  to that. But since (almost) all models are using vae-ft-mse-840000-ema-pruned,
  I did it the easy way first and will work on the better solution later.
2023-01-19 11:02:49 -05:00
Lincoln Stein
7bd2220a24 fix two bugs in model import
1. !import_model did not allow user to specify VAE file. This is now fixed.
2. !del_model did not offer the user the opportunity to delete the underlying
   weights file or diffusers directory. This is now fixed.
2023-01-19 01:30:58 -05:00
Lincoln Stein
284b432ffd add triton install instructions 2023-01-18 22:34:36 -05:00
Lincoln Stein
ab675af264 Merge branch 'main' into lstein-improve-ti-frontend 2023-01-18 22:22:30 -05:00
Daya Adianto
be58a6bfbc Merge branch 'main' into bugfix/free-gpu-mem-diffuser 2023-01-19 10:21:06 +07:00
Daya Adianto
5a40aadbee Ensure free_gpu_mem option is passed into the generator (#2326) 2023-01-19 09:57:03 +07:00
Lincoln Stein
e11f15cf78 Merge branch 'main' into lstein-import-safetensors 2023-01-18 17:09:48 -05:00
Lincoln Stein
ce17051b28 Store & load 🤗 models at XDG_CACHE_HOME if HF_HOME is not set (#2359)
This commit allows InvokeAI to store & load 🤗 models at a location set
by `XDG_CACHE_HOME` environment variable if `HF_HOME` is not set.

By integrating this commit, a user who either use `HF_HOME` or
`XDG_CACHE_HOME` environment variables in their environment can let
InvokeAI to reuse the existing cache directory used by 🤗 library by
default. I happened to benefit from this commit because I have a Jupyter
Notebook that uses 🤗 diffusers model stored at `XDG_CACHE_HOME`
directory.

Reference:
https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome
2023-01-18 17:05:06 -05:00
Lincoln Stein
a2bdc8b579 Merge branch 'lstein-import-safetensors' of github.com:invoke-ai/InvokeAI into lstein-import-safetensors 2023-01-18 12:16:06 -05:00
Lincoln Stein
1c62ae461e fix vae safetensor loading 2023-01-18 12:15:57 -05:00
Lincoln Stein
c5b802b596 Merge branch 'main' into feature/hub-in-xdg-cache-home 2023-01-18 11:53:46 -05:00
Lincoln Stein
b9ab9ffb4a Merge branch 'main' into lstein-import-safetensors 2023-01-18 10:58:38 -05:00
Lincoln Stein
f232068ab8 Update automated install doc - link to MS C libs (#2306)
Updated the link for the MS Visual C libraries - I'm not sure if MS
changed the location of the files but this new one leads right to the
file downloads.
2023-01-18 10:56:09 -05:00
Lincoln Stein
4556f29359 Merge branch 'main' into lstein/xformers-instructions 2023-01-18 09:33:17 -05:00
Lincoln Stein
c1521be445 add instructions for installing xFormers on linux 2023-01-18 09:31:19 -05:00
Daya Adianto
f3e952ecf0 Use global_cache_dir calls properly 2023-01-18 21:06:01 +07:00
Daya Adianto
aa4e8d8cf3 Migrate legacy models (pre-2.3.0) to 🤗 cache directory if exists 2023-01-18 21:02:31 +07:00
Daya Adianto
a7b2074106 Ignore free_gpu_mem when using 🤗 diffuser model (#2326) 2023-01-18 19:42:11 +07:00
Daya Adianto
2282e681f7 Store & load 🤗 models at XDG_CACHE_HOME if HF_HOME is not set
This commit allows InvokeAI to store & load 🤗 models at a location
set by `XDG_CACHE_HOME` environment variable if `HF_HOME` is not set.

Reference: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome
2023-01-18 19:32:09 +07:00
Lincoln Stein
6e2365f835 Merge branch 'main' into patch-1 2023-01-17 23:52:13 -05:00
Lincoln Stein
e4ea98c277 further improvements to initial load (#2330)
- Migration process will not crash if duplicate model files are found,
one in legacy location and the other in new location. The model in the
legacy location will be deleted in this case.

- Added a hint to stable-diffusion-2.1 telling people it will work best
with 768 pixel images.

- Added the anything-4.0 model.
2023-01-17 23:21:14 -05:00
Lincoln Stein
2fd5fe6c89 Merge branch 'main' into lstein-improve-migration 2023-01-17 22:55:58 -05:00
Lincoln Stein
4a9e93463d Merge branch 'lstein-import-safetensors' of github.com:invoke-ai/InvokeAI into lstein-import-safetensors 2023-01-17 22:52:50 -05:00
Lincoln Stein
0b5c0c374e load safetensors vaes 2023-01-17 22:51:57 -05:00
Lincoln Stein
5750f5dac2 Merge branch 'main' into lstein-import-safetensors 2023-01-17 21:31:56 -05:00
Kevin Turner
3fb095de88 do not use autocast for diffusers (#2349)
fixes #2345
2023-01-17 14:26:35 -08:00
Lincoln Stein
c5fecfe281 Merge branch 'main' into lstein-improve-migration 2023-01-17 17:05:12 -05:00
Kevin Turner
1fa6a3558e Merge branch 'main' into lstein-fix-autocast 2023-01-17 14:00:51 -08:00
Lincoln Stein
2ee68cecd9 tip fix (#2281)
Context: Small fix for the manual, added tab for a "!!! tip"
2023-01-17 16:25:09 -05:00
Lincoln Stein
c8d1d4d159 Merge branch 'main' into lstein-fix-autocast 2023-01-17 16:23:33 -05:00
Lincoln Stein
529b19f8f6 Merge branch 'main' into patch-1 2023-01-17 14:57:17 -05:00
Lincoln Stein
be4f44fafd [Enhancement] add --default_only arg to configure_invokeai.py, for CI use (#2355)
Added a --default_only argument that limits model downloads to the
single default model, for use in continuous integration.

New behavior

         - switch -
    --yes      --default_only           Behavior
    -----      --------------           --------

   <not set>     <not set>              interactive download

   --yes         <not set>              non-interactively download all
                                          recommended models

   --yes        --default_only          non-interactively download the
                                          default model
2023-01-17 14:56:50 -05:00
Kevin Turner
5aec48735e lint(generator): 🚮 remove unused imports 2023-01-17 11:44:45 -08:00
Kevin Turner
3c919f0337 Restore ldm/invoke/conditioning.py 2023-01-17 11:37:14 -08:00
mauwii
858ddffab6 add --default_only to run-preload-models step 2023-01-17 20:10:37 +01:00
Lincoln Stein
212fec669a add --default_only arg to configure_invokeai.py for CI use
Added a --default_only argument that limits model downloads to the single
default model, for use in continuous integration.

New behavior

         - switch -
    --yes      --default_only           Behavior
    -----      --------------           --------

   <not set>     <not set>              interactive download

   --yes         <not set>              non-interactively download all
                                          recommended models

   --yes        --default_only          non-interactively download the
                                          default model
2023-01-17 12:45:04 -05:00
Lincoln Stein
fc2098834d support direct loading of .safetensors models
- Small fix to allow ckpt files with the .safetensors suffix
  to be directly loaded, rather than undergo a conversion step
  first.
2023-01-17 08:11:19 -05:00
Lincoln Stein
8a31e5c5e3 allow safetensors models to be imported 2023-01-17 00:18:09 -05:00
Lincoln Stein
bcc0110c59 Merge branch 'lstein-fix-autocast' of github.com:invoke-ai/InvokeAI into lstein-fix-autocast 2023-01-16 23:18:54 -05:00
Lincoln Stein
ce1c5e70b8 fix autocast dependency in cross_attention_control 2023-01-16 23:18:43 -05:00
Lincoln Stein
ce00c9856f fix perlin noise and txt2img2img 2023-01-16 22:50:13 -05:00
Lincoln Stein
7e8f364d8d do not use autocast for diffusers
- All tensors in diffusers code path are now set explicitly to
  float32 or float16, depending on the --precision flag.
- autocast is still used in the ckpt path, since it is being
  deprecated.
2023-01-16 19:32:06 -05:00
Lincoln Stein
088cd2c4dd further tweaks to model management
- Work around problem with OmegaConf.update() that prevented model names
  from containing periods.
- Fix logic bug in !delete_model that didn't check for existence of model
  in config file.
2023-01-16 17:11:59 -05:00
Lincoln Stein
9460763eff Merge branch 'main' into lstein-improve-migration 2023-01-16 16:47:08 -05:00
Lincoln Stein
fe46d9d0f7 Merge branch 'main' into patch-1 2023-01-16 16:46:46 -05:00
Damian Stewart
563196bd03 pass step count and step index to diffusion step func (#2342) 2023-01-16 19:56:54 +00:00
Lincoln Stein
d2a038200c Merge branch 'main' into lstein-improve-migration 2023-01-16 14:22:13 -05:00
Lincoln Stein
d6ac0eeffd make SD-1.5 the default again 2023-01-16 14:21:34 -05:00
Lincoln Stein
3a1724652e upgrade requirements to CUDA 11.7, torch 1.13 (#2331)
* upgrade requirements to CUDA 11.7, torch 1.13

* fix ROCm version number

Co-authored-by: Lincoln Stein <lstein@gmail.com>
2023-01-16 14:19:27 -05:00
Lincoln Stein
8c073a7818 Merge branch 'main' into patch-1 2023-01-16 08:38:14 -05:00
Lincoln Stein
8c94f6a234 Merge branch 'main' into patch-1 2023-01-16 08:35:25 -05:00
Lincoln Stein
5fa8f8be43 Merge branch 'main' into lstein-improve-migration 2023-01-16 08:33:20 -05:00
Daya Adianto
5b35fa53a7 Improve readability of the manual installation documentation (#2296)
* docs: Fix links to pip and Conda installation methods

* docs: Improve installation script readability

This commit adds a space between `-m` option and the module name.

* docs: Fix alignments of step 4 & 9 in `pip` installation method

* docs: Rewrite step 10 of the ` pip` installation method

Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
2023-01-15 22:37:02 +00:00
Lincoln Stein
a2ee32f57f Merge branch 'main' into lstein-improve-ti-frontend 2023-01-15 17:12:50 -05:00
Brian Racer
4486169a83 pin dnspython version (#2327)
Fixes dns-related errors that began January 14, 2023
2023-01-15 17:08:45 -05:00
Lincoln Stein
bfeafa8d5e improve UI of textual inversion frontend
- File selection box now accepts directories that don't exist yet.
- Fixed crash when resume is selected and no files available to resume from.
2023-01-15 17:04:14 -05:00
Lincoln Stein
f86c8b043c further improvements to initial load
- Migration process will not crash if duplicate model files are found,
  one in legacy location and the other in new location.
  The model in the legacy location will be deleted in this case.

- Added a hint to stable-diffusion-2.1 telling people it will work best
  with 768 pixel images.

- Added the anything-4.0 model.
2023-01-15 15:08:59 -05:00
Lincoln Stein
251a409087 adjust initial model defaults (#2322)
- Default to SD 1.5
- Add waifu diffusion 1.4
2023-01-15 15:18:41 +00:00
Kevin Turner
6fdbc1978d use 🧨diffusers model (#1583)
* initial commit of DiffusionPipeline class

* spike: proof of concept using diffusers for txt2img

* doc: type hints for Generator

* refactor(model_cache): factor out load_ckpt

* model_cache: add ability to load a diffusers model pipeline

and update associated things in Generate & Generator to not instantly fail when that happens

* model_cache: fix model default image dimensions

* txt2img: support switching diffusers schedulers

* diffusers: let the scheduler do its scaling of the initial latents

Remove IPNDM scheduler; it is not behaving.

* web server: update image_progress callback for diffusers data

* diffusers: restore prompt weighting feature

* diffusers: fix set-sampler error following model switch

* diffusers: use InvokeAIDiffuserComponent for conditioning

* cross_attention_control: stub (no-op) implementations for diffusers

* model_cache: let offload_model work with DiffusionPipeline, sorta.

* models.yaml.example: add diffusers-format model, set as default

* test-invoke-conda: use diffusers-format model
test-invoke-conda: put huggingface-token where the library can use it

* environment-mac: upgrade to diffusers 0.7 (from 0.6)

this was already done for linux; mac must have been lost in the merge.

* preload_models: explicitly load diffusers models

In non-interactive mode too, as long as you're logged in.

* fix(model_cache): don't check `model.config` in diffusers format

clean-up from recent merge.

* diffusers integration: support img2img

* dev: upgrade to diffusers 0.8 (from 0.7.1)

We get to remove some code by using methods that were factored out in the base class.

* refactor: remove backported img2img.get_timesteps

now that we can use it directly from diffusers 0.8.1

* ci: use diffusers model

* dev: upgrade to diffusers 0.9 (from 0.8.1)

* lint: correct annotations for Python 3.9.

* lint: correct AttributeError.name reference for Python 3.9.

* CI: prefer diffusers-1.4 because it no longer requires a token

The RunwayML models still do.

* build: there's yet another place to update requirements?

* configure: try to download models even without token

Models in the CompVis and stabilityai repos no longer require them. (But runwayml still does.)

* configure: add troubleshooting info for config-not-found

* fix(configure): prepend root to config path

* fix(configure): remove second `default: true` from models example

* CI: simplify test-on-push logic now that we don't need secrets

The "test on push but only in forks" logic was only necessary when tests didn't work for PRs-from-forks.

* create an embedding_manager for diffusers

* internal: avoid importing diffusers DummyObject

see https://github.com/huggingface/diffusers/issues/1479

* fix "config attributes…not expected" diffusers warnings.

* fix deprecated scheduler construction

* work around an apparent MPS torch bug that causes conditioning to have no effect

* 🚧 post-rebase repair

* preliminary support for outpainting (no masking yet)

* monkey-patch diffusers.attention and use Invoke lowvram code

* add always_use_cpu arg to bypass MPS

* add cross-attention control support to diffusers (fails on MPS)

For unknown reasons MPS produces garbage output with .swap(). Use
--always_use_cpu arg to invoke.py for now to test this code on MPS.

* diffusers support for the inpainting model

* fix debug_image to not crash with non-RGB images.

* inpainting for the normal model [WIP]

This seems to be performing well until the LAST STEP, at which point it dissolves to confetti.

* fix off-by-one bug in cross-attention-control (#1774)

prompt token sequences begin with a "beginning-of-sequence" marker <bos> and end with a repeated "end-of-sequence" marker <eos> - to make a default prompt length of <bos> + 75 prompt tokens + <eos>. the .swap() code was failing to take the column for <bos> at index 0 into account. the changes here do that, and also add extra handling for a single <eos> (which may be redundant but which is included for completeness).

based on my understanding and some assumptions about how this all works, the reason .swap() nevertheless seemed to do the right thing, to some extent, is because over multiple steps the conditioning process in Stable Diffusion operates as a feedback loop. a change to token n-1 has flow-on effects to how the [1x4x64x64] latent tensor is modified by all the tokens after it, - and as the next step is processed, all the tokens before it as well. intuitively, a token's conditioning effects "echo" throughout the whole length of the prompt. so even though the token at n-1 was being edited when what the user actually wanted was to edit the token at n, it nevertheless still had some non-negligible effect, in roughly the right direction, often enough that it seemed like it was working properly.

* refactor common CrossAttention stuff into a mixin so that the old ldm code can still work if necessary

* inpainting for the normal model. I think it works this time.

* diffusers: reset num_vectors_per_token

sync with 44a0055571

* diffusers: txt2img2img (hires_fix)

with so much slicing and dicing of pipeline methods to stitch them together

* refactor(diffusers): reduce some code duplication amongst the different tasks

* fixup! refactor(diffusers): reduce some code duplication amongst the different tasks

* diffusers: enable DPMSolver++ scheduler

* diffusers: upgrade to diffusers 0.10, add Heun scheduler

* diffusers(ModelCache): stopgap to make from_cpu compatible with diffusers

* CI: default to diffusers-1.5 now that runwayml token requirement is gone

* diffusers: update to 0.10 (and transformers to 4.25)

* diffusers: use xformers when available

diffusers no longer auto-enables this as of 0.10.2.

* diffusers: make masked img2img behave better with multi-step schedulers

re-randomizing the noise each step was confusing them.

* diffusers: work more better with more models.

fixed relative path problem with local models.

fixed models on hub not always having a `fp16` branch.

* diffusers: stopgap fix for attention_maps_callback crash after recent merge

* fixup import merge conflicts

correction for 061c5369a2

* test: add tests/inpainting inputs for masked img2img

* diffusers(AddsMaskedGuidance): partial fix for k-schedulers

Prevents them from crashing, but results are still hot garbage.

* fix --safety_checker arg parsing

and add note to diffusers loader about where safety checker gets called

* generate: fix import error

* CI: don't try to read the old init location

* diffusers: support loading an alternate VAE

* CI: remove sh-syntax if-statement so it doesn't crash powershell

* CI: fold strings in yaml because backslash is not line-continuation in powershell

* attention maps callback stuff for diffusers

* build: fix syntax error in environment-mac

* diffusers: add INITIAL_MODELS with diffusers-compatible repos

* re-enable the embedding manager; closes #1778

* Squashed commit of the following:

commit e4a956abc37fcb5cf188388b76b617bc5c8fda7d
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 15:43:07 2022 +0100

    import new load handling from EmbeddingManager and cleanup

commit c4abe91a5ba0d415b45bf734068385668b7a66e6
Merge: 032e856e 1efc6397
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 15:09:53 2022 +0100

    Merge branch 'feature_textual_inversion_mgr' into dev/diffusers_with_textual_inversion_manager

commit 032e856eefb3bbc39534f5daafd25764bcfcef8b
Merge: 8b4f0fe9 bc515e24
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 15:08:01 2022 +0100

    Merge remote-tracking branch 'upstream/dev/diffusers' into dev/diffusers_with_textual_inversion_manager

commit 1efc6397fc6e61c1aff4b0258b93089d61de5955
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 15:04:28 2022 +0100

    cleanup and add performance notes

commit e400f804ac471a0ca2ba432fd658778b20c7bdab
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 14:45:07 2022 +0100

    fix bug and update unit tests

commit deb9ae0ae1016750e93ce8275734061f7285a231
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 14:28:29 2022 +0100

    textual inversion manager seems to work

commit 162e02505dec777e91a983c4d0fb52e950d25ff0
Merge: cbad4583 12769b3d
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 11:58:03 2022 +0100

    Merge branch 'main' into feature_textual_inversion_mgr

commit cbad45836c6aace6871a90f2621a953f49433131
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 11:54:10 2022 +0100

    use position embeddings

commit 070344c69b0e0db340a183857d0a787b348681d3
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 11:53:47 2022 +0100

    Don't crash CLI on exceptions

commit b035ac8c6772dfd9ba41b8eeb9103181cda028f8
Author: Damian Stewart <d@damianstewart.com>
Date:   Sun Dec 18 11:11:55 2022 +0100

    add missing position_embeddings

commit 12769b3d3562ef71e0f54946b532ad077e10043c
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 13:33:25 2022 +0100

    debugging why it don't work

commit bafb7215eabe1515ca5e8388fd3bb2f3ac5362cf
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 13:21:33 2022 +0100

    debugging why it don't work

commit 664a6e9e14
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 12:48:38 2022 +0100

    use TextualInversionManager in place of embeddings (wip, doesn't work)

commit 8b4f0fe9d6e4e2643b36dfa27864294785d7ba4e
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 12:48:38 2022 +0100

    use TextualInversionManager in place of embeddings (wip, doesn't work)

commit ffbe1ab11163ba712e353d89404e301d0e0c6cdf
Merge: 6e4dad60 023df37e
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 02:37:31 2022 +0100

    Merge branch 'feature_textual_inversion_mgr' into dev/diffusers

commit 023df37eff
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 02:36:54 2022 +0100

    cleanup

commit 05fac594ea
Author: Damian Stewart <d@damianstewart.com>
Date:   Fri Dec 16 02:07:49 2022 +0100

    tweak error checking

commit 009f32ed39
Author: damian <null@damianstewart.com>
Date:   Thu Dec 15 21:29:47 2022 +0100

    unit tests passing for embeddings with vector length >1

commit beb1b08d9a
Author: Damian Stewart <d@damianstewart.com>
Date:   Thu Dec 15 13:39:09 2022 +0100

    more explicit equality tests when overwriting

commit 44d8a5a7c8
Author: Damian Stewart <d@damianstewart.com>
Date:   Thu Dec 15 13:30:13 2022 +0100

    wip textual inversion manager (unit tests passing for 1v embedding overwriting)

commit 417c2b57d9
Author: Damian Stewart <d@damianstewart.com>
Date:   Thu Dec 15 12:30:55 2022 +0100

    wip textual inversion manager (unit tests passing for base stuff + padding)

commit 2e80872e3b
Author: Damian Stewart <d@damianstewart.com>
Date:   Thu Dec 15 10:57:57 2022 +0100

    wip new TextualInversionManager

* stop using WeightedFrozenCLIPEmbedder

* store diffusion models locally

- configure_invokeai.py reconfigured to store diffusion models rather than
  CompVis models
- hugging face caching model is used, but cache is set to ~/invokeai/models/repo_id
- models.yaml does **NOT** use path, just repo_id
- "repo_name" changed to "repo_id" to following hugging face conventions
- Models are loaded with full precision pending further work.

* allow non-local files during development

* path takes priority over repo_id

* MVP for model_cache and configure_invokeai

- Feature complete (almost)

- configure_invokeai.py downloads both .ckpt and diffuser models,
  along with their VAEs. Both types of download are controlled by
  a unified INITIAL_MODELS.yaml file.

- model_cache can load both type of model and switches back and forth
  in CPU. No memory leaks detected

TO DO:

  1. I have not yet turned on the LocalOnly flag for diffuser models, so
     the code will check the Hugging Face repo for updates before using the
     locally cached models. This will break firewalled systems. I am thinking
     of putting in a global check for internet connectivity at startup time
     and setting the LocalOnly flag based on this. It would be good to check
     updates if there is connectivity.

  2. I have not gone completely through INITIAL_MODELS.yaml to check which
     models are available as diffusers and which are not. So models like
     PaperCut and VoxelArt may not load properly. The runway and stability
     models are checked, as well as the Trinart models.

  3. Add stanzas for SD 2.0 and 2.1 in INITIAL_MODELS.yaml

REMAINING PROBLEMS NOT DIRECTLY RELATED TO MODEL_CACHE:

  1. When loading a .ckpt file there are lots of messages like this:

     Warning! ldm.modules.attention.CrossAttention is no longer being
     maintained. Please use InvokeAICrossAttention instead.

     I'm not sure how to address this.

  2. The ckpt models ***don't actually run*** due to the lack of special-case
     support for them in the generator objects. For example, here's the hard
     crash you get when you run txt2img against the legacy waifu-diffusion-1.3
     model:
```
     >> An error occurred:
     Traceback (most recent call last):
       File "/data/lstein/InvokeAI/ldm/invoke/CLI.py", line 140, in main
           main_loop(gen, opt)
      File "/data/lstein/InvokeAI/ldm/invoke/CLI.py", line 371, in main_loop
         gen.prompt2image(
      File "/data/lstein/InvokeAI/ldm/generate.py", line 496, in prompt2image
	 results = generator.generate(
      File "/data/lstein/InvokeAI/ldm/invoke/generator/base.py", line 108, in generate
         image = make_image(x_T)
      File "/data/lstein/InvokeAI/ldm/invoke/generator/txt2img.py", line 33, in make_image
         pipeline_output = pipeline.image_from_embeddings(
      File "/home/lstein/invokeai/.venv/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1265, in __getattr__
         raise AttributeError("'{}' object has no attribute '{}'".format(
     AttributeError: 'LatentDiffusion' object has no attribute 'image_from_embeddings'
```

  3. The inpainting diffusion model isn't working. Here's the output of "banana
     sushi" when inpainting-1.5 is loaded:

```
    Traceback (most recent call last):
      File "/data/lstein/InvokeAI/ldm/generate.py", line 496, in prompt2image
        results = generator.generate(
      File "/data/lstein/InvokeAI/ldm/invoke/generator/base.py", line 108, in generate
        image = make_image(x_T)
      File "/data/lstein/InvokeAI/ldm/invoke/generator/txt2img.py", line 33, in make_image
        pipeline_output = pipeline.image_from_embeddings(
      File "/data/lstein/InvokeAI/ldm/invoke/generator/diffusers_pipeline.py", line 301, in image_from_embeddings
        result_latents, result_attention_map_saver = self.latents_from_embeddings(
      File "/data/lstein/InvokeAI/ldm/invoke/generator/diffusers_pipeline.py", line 330, in latents_from_embeddings
        result: PipelineIntermediateState = infer_latents_from_embeddings(
      File "/data/lstein/InvokeAI/ldm/invoke/generator/diffusers_pipeline.py", line 185, in __call__
        for result in self.generator_method(*args, **kwargs):
      File "/data/lstein/InvokeAI/ldm/invoke/generator/diffusers_pipeline.py", line 367, in generate_latents_from_embeddings
        step_output = self.step(batched_t, latents, guidance_scale,
      File "/home/lstein/invokeai/.venv/lib/python3.9/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
        return func(*args, **kwargs)
      File "/data/lstein/InvokeAI/ldm/invoke/generator/diffusers_pipeline.py", line 409, in step
        step_output = self.scheduler.step(noise_pred, timestep, latents, **extra_step_kwargs)
      File "/home/lstein/invokeai/.venv/lib/python3.9/site-packages/diffusers/schedulers/scheduling_lms_discrete.py", line 223, in step
        pred_original_sample = sample - sigma * model_output
    RuntimeError: The size of tensor a (9) must match the size of tensor b (4) at non-singleton dimension 1
```

* proper support for float32/float16

- configure script now correctly detects user's preference for
  fp16/32 and downloads the correct diffuser version. If fp16
  version not available, falls back to fp32 version.

- misc code cleanup and simplification in model_cache

* add on-the-fly conversion of .ckpt to diffusers models

1. On-the-fly conversion code can be found in the file ldm/invoke/ckpt_to_diffusers.py.

2. A new !optimize command has been added to the CLI. Should be ported to Web GUI.

User experience on the CLI is this:

```
invoke> !optimize /home/lstein/invokeai/models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
INFO: Converting legacy weights file /home/lstein/invokeai/models/ldm/stable-diffusion-v1/sd-v1-4.ckpt to optimized diffuser model.
      This operation will take 30-60s to complete.
Success. Optimized model is now located at /home/lstein/tmp/invokeai/models/optimized-ckpts/sd-v1-4
Writing new config file entry for sd-v1-4...

>> New configuration:
sd-v1-4:
  description: Optimized version of sd-v1-4
  format: diffusers
  path: /home/lstein/tmp/invokeai/models/optimized-ckpts/sd-v1-4

OK to import [n]? y
>> Verifying that new model loads...
>> Current VRAM usage:  2.60G
>> Offloading stable-diffusion-2.1 to CPU
>> Loading diffusers model from /home/lstein/tmp/invokeai/models/optimized-ckpts/sd-v1-4
  | Using faster float16 precision
You have disabled the safety checker for <class 'ldm.invoke.generator.diffusers_pipeline.StableDiffusionGeneratorPipeline'> by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion \
license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances,\
 disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .
  | training width x height = (512 x 512)
>> Model loaded in 3.48s
>> Max VRAM used to load the model: 2.17G
>> Current VRAM usage:2.17G
>> Textual inversions available:
>> Setting Sampler to k_lms (LMSDiscreteScheduler)
Keep model loaded? [y]
```

* add parallel set of generator files for ckpt legacy generation

* generation using legacy ckpt models now working

* diffusers: fix missing attention_maps_callback

fix for 23eb80b404

* associate legacy CrossAttention with .ckpt models

* enable autoconvert

New --autoconvert CLI option will scan a designated directory for
new .ckpt files, convert them into diffuser models, and import
them into models.yaml.

Works like this:

   invoke.py --autoconvert /path/to/weights/directory

In ModelCache added two new methods:

  autoconvert_weights(config_path, weights_directory_path, models_directory_path)
  convert_and_import(ckpt_path, diffuser_path)

* diffusers: update to diffusers 0.11 (from 0.10.2)

* fix vae loading & width/height calculation

* refactor: encapsulate these conditioning data into one container

* diffusers: fix some noise-scaling issues by pushing the noise-mixing down to the common function

* add support for safetensors and accelerate

* set local_files_only when internet unreachable

* diffusers: fix error-handling path when model repo has no fp16 branch

* fix generatorinpaint error

Fixes :
  "ModuleNotFoundError: No module named 'ldm.invoke.generatorinpaint'
   https://github.com/invoke-ai/InvokeAI/pull/1583#issuecomment-1363634318

* quench diffuser safety-checker warning

* diffusers: support stochastic DDIM eta parameter

* fix conda env creation on macos

* fix cross-attention with diffusers 0.11

* diffusers: the VAE needs to be tiling as well as the U-Net

* diffusers: comment on subfolders

* diffusers: embiggen!

* diffusers: make model_cache.list_models serializable

* diffusers(inpaint): restore scaling functionality

* fix requirements clash between numba and numpy 1.24

* diffusers: allow inpainting model to do non-inpainting tasks

* start expanding model_cache functionality

* add import_ckpt_model() and import_diffuser_model() methods to model_manager

- in addition, model_cache.py is now renamed to model_manager.py

* allow "recommended" flag to be optional in INITIAL_MODELS.yaml

* configure_invokeai now downloads VAE diffusers in advance

* rename ModelCache to ModelManager

* remove support for `repo_name` in models.yaml

* check for and refuse to load embeddings trained on incompatible models

* models.yaml.example: s/repo_name/repo_id

and remove extra INITIAL_MODELS now that the main one has diffusers models in it.

* add MVP textual inversion script

* refactor(InvokeAIDiffuserComponent): factor out _combine()

* InvokeAIDiffuserComponent: implement threshold

* InvokeAIDiffuserComponent: diagnostic logs for threshold

...this does not look right

* add a curses-based frontend to textual inversion

- not quite working yet
- requires npyscreen installed
- on windows will also have the windows-curses requirement, but not added
  to requirements yet

* add curses-based interface for textual inversion

* fix crash in convert_and_import()

- This corrects a "local variable referenced before assignment" error
  in model_manager.convert_and_import()

* potential workaround for no 'state_dict' key error

- As reported in https://github.com/huggingface/diffusers/issues/1876

* create TI output dir if needed

* Update environment-lin-cuda.yml (#2159)

Fixing line 42 to be the proper order to define the transformers requirement: ~= instead of =~

* diffusers: update sampler-to-scheduler mapping

based on https://github.com/huggingface/diffusers/issues/277#issuecomment-1371428672

* improve user exp for ckt to diffusers conversion

- !optimize_models command now operates on an existing ckpt file entry in models.yaml
- replaces existing entry, rather than adding a new one
- offers to delete the ckpt file after conversion

* web: adapt progress callback to deal with old generator or new diffusers pipeline

* clean-up model_manager code

- add_model() verified to work for .ckpt local paths,
  .ckpt remote URLs, diffusers local paths, and
  diffusers repo_ids

- convert_and_import() verified to work for local and
  remove .ckpt files

* handle edge cases for import_model() and convert_model()

* add support for safetensor .ckpt files

* fix name error

* code cleanup with pyflake

* improve model setting behavior

- If the user enters an invalid model name at startup time, will not
  try to load it, warn, and use default model
- CLI UI enhancement: include currently active model in the command
  line prompt.

* update test-invoke-pip.yml
- fix model cache path to point to runwayml/stable-diffusion-v1-5
- remove `skip-sd-weights` from configure_invokeai.py args

* exclude dev/diffusers from "fail for draft PRs"

* disable "fail on PR jobs"

* re-add `--skip-sd-weights` since no space

* update workflow environments
- include `INVOKE_MODEL_RECONFIGURE: '--yes'`

* clean up model load failure handling

- Allow CLI to run even when no model is defined or loadable.
- Inhibit stack trace when model load fails - only show last error
- Give user *option* to run configure_invokeai.py when no models
  successfully load.
- Restart invokeai after reconfiguration.

* further edge-case handling

1) only one model in models.yaml file, and that model is broken
2) no models in models.yaml
3) models.yaml doesn't exist at all

* fix incorrect model status listing

- "cached" was not being returned from list_models()
- normalize handling of exceptions during model loading:
   - Passing an invalid model name to generate.set_model() will return
     a KeyError
   - All other exceptions are returned as the appropriate Exception

* CI: do download weights (if not already cached)

* diffusers: fix scheduler loading in offline mode

* CI: fix model name (no longer has `diffusers-` prefix)

* Update txt2img2img.py (#2256)

* fixes to share models with HuggingFace cache system

- If HF_HOME environment variable is defined, then all huggingface models
  are stored in that directory following the standard conventions.
- For seamless interoperability, set HF_HOME to ~/.cache/huggingface
- If HF_HOME not defined, then models are stored in ~/invokeai/models.
  This is equivalent to setting HF_HOME to ~/invokeai/models

A future commit will add a migration mechanism so that this change doesn't
break previous installs.

* feat - make model storage compatible with hugging face caching system

This commit alters the InvokeAI model directory to be compatible with
hugging face, making it easier to share diffusers (and other models)
across different programs.

- If the HF_HOME environment variable is not set, then models are
  cached in ~/invokeai/models in a format that is identical to the
  HuggingFace cache.

- If HF_HOME is set, then models are cached wherever HF_HOME points.

- To enable sharing with other HuggingFace library clients, set
  HF_HOME to ~/.cache/huggingface to set the default cache location
  or to ~/invokeai/models to have huggingface cache inside InvokeAI.

* fixes to share models with HuggingFace cache system

    - If HF_HOME environment variable is defined, then all huggingface models
      are stored in that directory following the standard conventions.
    - For seamless interoperability, set HF_HOME to ~/.cache/huggingface
    - If HF_HOME not defined, then models are stored in ~/invokeai/models.
      This is equivalent to setting HF_HOME to ~/invokeai/models

    A future commit will add a migration mechanism so that this change doesn't
    break previous installs.

* fix error "no attribute CkptInpaint"

* model_manager.list_models() returns entire model config stanza+status

* Initial Draft - Model Manager Diffusers

* added hash function to diffusers

* implement sha256 hashes on diffusers models

* Add Model Manager Support for Diffusers

* fix various problems with model manager

- in cli import functions, fix not enough values to unpack from
  _get_name_and_desc()
- fix crash when using old-style vae: value with new-style diffuser

* rebuild frontend

* fix dictconfig-not-serializable issue

* fix NoneType' object is not subscriptable crash in model_manager

* fix "str has no attribute get" error in model_manager list_models()

* Add path and repo_id support for Diffusers Model Manager

Also fixes bugs

* Fix tooltip IT localization not working

* Add Version Number To WebUI

* Optimize Model Search

* Fix incorrect font on the Model Manager UI

* Fix image degradation on merge fixes - [Experimental]

This change should effectively fix a couple of things.

- Fix image degradation on subsequent merges of the canvas layers.
- Fix the slight transparent border that is left behind when filling the bounding box with a color.
- Fix the left over line of color when filling a bounding box with color.

So far there are no side effects for this. If any, please report.

* Add local model filtering for Diffusers / Checkpoints

* Go to home on modal close for the Add Modal UI

* Styling Fixes

* Model Manager Diffusers Localization Update

* Add Safe Tensor scanning to Model Manager

* Fix model edit form dispatching string values instead of numbers.

* Resolve VAE handling / edge cases for supplied repos

* defer injecting tokens for textual inversions until they're used for the first time

* squash a console warning

* implement model migration check

* add_model() overwrites previous config rather than merges

* fix model config file attribute merging

* fix precision handling in textual inversion script

* allow ckpt conversion script to work with safetensors .ckpts

Applied patch here:
beb932c5d1

* fix name "args" is not defined crash in textual_inversion_training

* fix a second NameError: name 'args' is not defined crash

* fix loading of the safety checker from the global cache dir

* add installation step to textual inversion frontend

- After a successful training run, the script will copy learned_embeds.bin
  to a subfolder of the embeddings directory.
- User given the option to delete the logs and intermediate checkpoints
  (which together use 7-8G of space)
- If textual inversion training fails, reports the error gracefully.

* don't crash out on incompatible embeddings

- put try: blocks around places where the system tries to load an embedding
  which is incompatible with the currently loaded model

* add support for checkpoint resuming

* textual inversion preferences are saved and restored between sessions

- Preferences are stored in a file named text-inversion-training/preferences.conf
- Currently the resume-from-checkpoint option is not working correctly. Possible
  bug in textual_inversion_training.py?

* copy learned_embeddings.bin into right location

* add front end for diffusers model merging

- Front end doesn't do anything yet!!!!
- Made change to model name parsing in CLI to support ability to have merged models
  with the "+" character in their names.

* improve inpainting experience

- recommend ckpt version of inpainting-1.5 to user
- fix get_noise() bug in ckpt version of omnibus.py

* update environment*yml

* tweak instructions to install HuggingFace token

* bump version number

* enhance update scripts

- update scripts will now fetch new INITIAL_MODELS.yaml so that
  configure_invokeai.py will know about the diffusers versions.

* enhance invoke.sh/invoke.bat launchers

- added configure_invokeai.py to menu
- menu defaults to browser-based invoke

* remove conda workflow (#2321)

* fix `token_ids has shape torch.Size([79]) - expected [77]`

* update CHANGELOG.md with 2.3.* info

- Add information on how formats have changed and the upgrade process.
- Add short bug list.

Co-authored-by: Damian Stewart <d@damianstewart.com>
Co-authored-by: Damian Stewart <null@damianstewart.com>
Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
Co-authored-by: Wybartel-luxmc <37852506+Wybartel-luxmc@users.noreply.github.com>
Co-authored-by: mauwii <Mauwii@outlook.de>
Co-authored-by: mickr777 <115216705+mickr777@users.noreply.github.com>
Co-authored-by: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com>
Co-authored-by: Eugene Brodsky <ebr@users.noreply.github.com>
Co-authored-by: Matthias Wild <40327258+mauwii@users.noreply.github.com>
2023-01-15 09:22:46 -05:00
Lincoln Stein
c855d2a350 Consolidate version numbers (#2201)
* update version number

* print version number at startup

* move version number into ldm/invoke/_version.py

* bump version to 2.2.6+a0

* handle whitespace better

* resolve issues raised by mauwii during PR review
2023-01-15 04:07:21 +01:00
Kent Keirsey
4dd74cdc68 update Readme (#2278)
* Update Readme & Assets

* Update Canvas Assets

* Updated Readme to correct missing refs

* Correcting refs

* Updating Canvas Preview size

Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
2023-01-15 01:13:11 +00:00
Lincoln Stein
746e97ea1d enhance the installer (#2299)
1. create_installers.sh now asks before tagging and committing the
   current repo
2. trailing whitespace removed from user-provided location of invokeai
   directory in install.bat
2023-01-14 19:28:14 -05:00
gogurtenjoyer
241313c4a6 Update automated install doc - link to MS C libs
Updated the link for the MS Visual C libraries - I'm not sure if MS changed the location of the files but this new one leads right to the file downloads.
2023-01-12 14:09:35 -08:00
Edward Johan
b6d1a17a1e tip fix 2023-01-09 23:53:55 +06:00
Lincoln Stein
c73434c2a3 tweak install instructions (#2227)
- Removed links from the install instructions to the installer zip files.
- Replaced "2.2.4" with "2.X.X" globally, to avoid the docs going out of
  date.
2023-01-09 00:12:41 +00:00
Matthias Wild
69b15024a9 update python requirements (#2251)
since torch versions <0.13.1 have a critical security issue
2023-01-08 07:44:03 -05:00
William Chong
26e413ae9c Require huggingface-hub version 0.11.1 (#2222)
`import login` only works in huggingface-hub >= 0.11.0

Fixes https://github.com/invoke-ai/InvokeAI/issues/2149
2023-01-04 22:21:48 +00:00
Chris Dawson
91eb84c5d9 Allow multiple CORS origins (#2031)
* Permit cmd override for CORS modification

* Enable multiple origins for CORS

* Remove CMD_OVERRIDE

* Revert executable bit change

* Defensively convert list into string

* Bad if statement

* Retry rebase

* Retry rebase

Co-authored-by: Chris Dawson <chris@vivoh.com>
2023-01-04 14:26:42 -05:00
Lincoln Stein
5d69bd408b fix facexlib weights being downloaded to .venv (#2221)
- fix problem of facexlib weights being downloaded into the .venv
  package directory when codeformer restoration requested.
- now users pre-downloaded weights in ~/invokeai/models/gfpgan/weights
  (which is shared with gfpgan)

Co-authored-by: Matthias Wild <40327258+mauwii@users.noreply.github.com>
2023-01-04 14:22:49 -05:00
Minjune Song
21bf512056 Local embeddings support (CLI autocomplete) (#2211)
* integrate local embeds with HF embeds

* Update concepts_lib.py

* Update concepts_lib.py

Co-authored-by: BuildTools <unconfigured@null.spigotmc.org>
Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
2023-01-04 06:22:10 +00:00
Lincoln Stein
6c6e534c1a fix codeformer facexlib files being downloaded into .venv
- Fixed codeformer module so that the facexlib files are downloaded
  into their pre-stored location in models/gfpgan/weights (shared
  with the GFPGAN module)
2023-01-04 00:13:33 -05:00
Name
010378153f spelling mistake fixxed
wil -> will
2023-01-04 05:48:18 +13:00
Jeremy Clark
9091b6e24a Explicitly call python found in system (#2203)
Explicitly calls the python bin found in the system instead of calling `python` which may fail on systems where python is installed as `python3`
2023-01-02 13:47:01 +00:00
Matthias Wild
64700b07a8 fixing a typo in invoke.py (#2204) 2023-01-02 02:39:43 +00:00
Matthias Wild
34f8117241 Fix patchmatch-docs (#2111)
* use `uname -m` instead of `arch`
addressing #2105

* fix install patchmatch formating

* fix 2 broken links

* remove instruction to do develop install of patchmatch

Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
2023-01-01 20:52:05 +00:00
848 changed files with 30477 additions and 10560 deletions

View File

@@ -1,19 +1,23 @@
# use this file as a whitelist
*
!backend
!environments-and-requirements
!frontend
!invokeai
!ldm
!main.py
!scripts
!server
!static
!setup.py
!pyproject.toml
!README.md
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
**/*.ckpt
# unignore configs, but only ignore the custom models.yaml, in case it exists
!configs
configs/models.yaml
# ignore frontend but whitelist dist
invokeai/frontend/**
!invokeai/frontend/dist
# ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets
!invokeai/assets/web
# ignore python cache
**/__pycache__
**/*.py[cod]
**/*.egg-info

57
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,50 @@
ldm/invoke/pngwriter.py @CapableWeb
ldm/invoke/server_legacy.py @CapableWeb
scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb
installer/ @tildebyte
.github/workflows/ @mauwii
docker_build/ @mauwii
# continuous integration
/.github/workflows/ @mauwii
# documentation
/docs/ @lstein @mauwii @tildebyte
mkdocs.yml @lstein @mauwii
# installation and configuration
/pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii
/scripts/ @ebr @lstein
/installer/ @ebr @lstein @tildebyte
ldm/invoke/config @lstein @ebr
invokeai/assets @lstein @ebr
invokeai/configs @lstein @ebr
/ldm/invoke/_version.py @lstein @blessedcoolant
# web ui
/invokeai/frontend @blessedcoolant @psychedelicious
/invokeai/backend @blessedcoolant @psychedelicious
# generation and model management
/ldm/*.py @lstein
/ldm/generate.py @lstein @keturn
/ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein
/ldm/invoke/ckpt_generator @lstein
/ldm/invoke/CLI.py @lstein
/ldm/invoke/config @lstein @ebr @mauwii
/ldm/invoke/generator @keturn @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein
/ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein
/ldm/invoke/patchmatch.py @Kyle0654
/ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn
/ldm/modules @damian0815 @keturn
# Nodes
apps/ @Kyle0654
# legacy REST API
# is CapableWeb still engaged?
/ldm/invoke/pngwriter.py @CapableWeb
/ldm/invoke/server_legacy.py @CapableWeb
/scripts/legacy_api.py @CapableWeb
/tests/legacy_tests.sh @CapableWeb

View File

@@ -1,87 +0,0 @@
name: Build and push cloud image
on:
workflow_dispatch:
# push:
# branches:
# - main
# tags:
# - v*
# # we will NOT push the image on pull requests, only test buildability.
# pull_request:
# branches:
# - main
permissions:
contents: read
packages: write
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
docker:
strategy:
fail-fast: false
matrix:
arch:
- x86_64
# requires resolving a patchmatch issue
# - aarch64
runs-on: ubuntu-latest
name: ${{ matrix.arch }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: matrix.arch == 'aarch64'
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# see https://github.com/docker/metadata-action
# will push the following tags:
# :edge
# :main (+ any other branches enabled in the workflow)
# :<tag>
# :1.2.3 (for semver tags)
# :1.2 (for semver tags)
# :<sha>
tags: |
type=edge,branch=main
type=ref,event=branch
type=ref,event=tag
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
# suffix image tags with architecture
flavor: |
latest=auto
suffix=-${{ matrix.arch }},latest=true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# do not login to container registry on PRs
- if: github.event_name != 'pull_request'
name: Docker login
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push cloud image
uses: docker/build-push-action@v3
with:
context: .
file: docker-build/Dockerfile.cloud
platforms: Linux/${{ matrix.arch }}
# do not push the image on PRs
push: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -3,67 +3,73 @@ on:
push:
branches:
- 'main'
- 'update/ci/*'
tags:
- 'v*.*.*'
jobs:
docker:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:
registry:
- ghcr.io
flavor:
- amd
- cuda
# - cloud
- cpu
include:
- flavor: amd
pip-requirements: requirements-lin-amd.txt
dockerfile: docker-build/Dockerfile
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cuda
pip-requirements: requirements-lin-cuda.txt
dockerfile: docker-build/Dockerfile
pip-extra-index-url: ''
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
# - flavor: cloud
# pip-requirements: requirements-lin-cuda.txt
# dockerfile: docker-build/Dockerfile.cloud
# platforms: linux/amd64
runs-on: ubuntu-latest
name: ${{ matrix.flavor }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }}
github-token: ${{ secrets.GITHUB_TOKEN }}
images: ghcr.io/${{ github.repository }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
type=semver,pattern={{major}}
type=sha,enable=true,prefix=sha-,format=short
flavor: |
latest=true
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: ${{ matrix.platforms }}
- if: github.event_name != 'pull_request'
name: Docker login
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
registry: ${{ matrix.registry }}
username: ${{ github.actor }}
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build container
uses: docker/build-push-action@v3
uses: docker/build-push-action@v4
with:
context: .
file: ${{ matrix.dockerfile }}
@@ -71,4 +77,16 @@ jobs:
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: pip_requirements=${{ matrix.pip-requirements }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Output image, digest and metadata to summary
run: |
{
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
echo digest: "${{ steps.docker_build.outputs.digest }}"
echo labels: "${{ steps.meta.outputs.labels }}"
echo tags: "${{ steps.meta.outputs.tags }}"
echo version: "${{ steps.meta.outputs.version }}"
} >> "$GITHUB_STEP_SUMMARY"

34
.github/workflows/clean-caches.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: cleanup caches by a branch
on:
pull_request:
types:
- closed
workflow_dispatch:
jobs:
cleanup:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Cleanup
run: |
gh extension install actions/gh-actions-cache
REPO=${{ github.repository }}
BRANCH=${{ github.ref }}
echo "Fetching list of cache key"
cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 )
## Setting this to not fail the workflow while deleting cache keys.
set +e
echo "Deleting caches..."
for cacheKey in $cacheKeysForPR
do
gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm
done
echo "Done"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -3,17 +3,18 @@ name: Lint frontend
on:
pull_request:
paths:
- 'frontend/**'
- 'invokeai/frontend/**'
push:
paths:
- 'frontend/**'
- 'invokeai/frontend/**'
defaults:
run:
working-directory: frontend
working-directory: invokeai/frontend
jobs:
lint-frontend:
if: github.event.pull_request.draft == false
runs-on: ubuntu-22.04
steps:
- name: Setup Node 18

View File

@@ -7,6 +7,7 @@ on:
jobs:
mkdocs-material:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: checkout sources

View File

@@ -9,6 +9,7 @@ on:
jobs:
pyflakes:
name: runner / pyflakes
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2

41
.github/workflows/pypi-release.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: PyPI Release
on:
push:
paths:
- 'ldm/invoke/_version.py'
workflow_dispatch:
jobs:
release:
if: github.repository == 'invoke-ai/InvokeAI'
runs-on: ubuntu-22.04
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
TWINE_NON_INTERACTIVE: 1
steps:
- name: checkout sources
uses: actions/checkout@v3
- name: install deps
run: pip install --upgrade build twine
- name: build package
run: python3 -m build
- name: check distribution
run: twine check dist/*
- name: check PyPI versions
if: github.ref == 'refs/heads/main'
run: |
pip install --upgrade requests
python -c "\
import scripts.pypi_helper; \
EXISTS=scripts.pypi_helper.local_on_pypi(); \
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
- name: upload package
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
run: twine upload dist/*

View File

@@ -1,161 +0,0 @@
name: Test invoke.py
on:
push:
branches:
- 'main'
pull_request:
branches:
- 'main'
types:
- 'ready_for_review'
- 'opened'
- 'synchronize'
- 'converted_to_draft'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
fail_if_pull_request_is_draft:
if: github.event.pull_request.draft == true
runs-on: ubuntu-22.04
steps:
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
run: exit 1
matrix:
if: github.event.pull_request.draft == false
strategy:
matrix:
stable-diffusion-model:
- 'stable-diffusion-1.5'
environment-yaml:
- environment-lin-amd.yml
- environment-lin-cuda.yml
- environment-mac.yml
- environment-win-cuda.yml
include:
- environment-yaml: environment-lin-amd.yml
os: ubuntu-22.04
curl-command: curl
github-env: $GITHUB_ENV
default-shell: bash -l {0}
- environment-yaml: environment-lin-cuda.yml
os: ubuntu-22.04
curl-command: curl
github-env: $GITHUB_ENV
default-shell: bash -l {0}
- environment-yaml: environment-mac.yml
os: macos-12
curl-command: curl
github-env: $GITHUB_ENV
default-shell: bash -l {0}
- environment-yaml: environment-win-cuda.yml
os: windows-2022
curl-command: curl.exe
github-env: $env:GITHUB_ENV
default-shell: pwsh
- stable-diffusion-model: stable-diffusion-1.5
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
name: ${{ matrix.environment-yaml }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
env:
CONDA_ENV_NAME: invokeai
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
defaults:
run:
shell: ${{ matrix.default-shell }}
steps:
- name: Checkout sources
id: checkout-sources
uses: actions/checkout@v3
- name: create models.yaml from example
run: |
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
- name: create environment.yml
run: cp "environments-and-requirements/${{ matrix.environment-yaml }}" environment.yml
- name: Use cached conda packages
id: use-cached-conda-packages
uses: actions/cache@v3
with:
path: ~/conda_pkgs_dir
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-yaml) }}
- name: Activate Conda Env
id: activate-conda-env
uses: conda-incubator/setup-miniconda@v2
with:
activate-environment: ${{ env.CONDA_ENV_NAME }}
environment-file: environment.yml
miniconda-version: latest
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
- name: set test prompt to development branch validation
if: ${{ github.ref == 'refs/heads/development' }}
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
- name: set test prompt to Pull Request validation
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: Use Cached Stable Diffusion Model
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: cache-${{ matrix.stable-diffusion-model }}
with:
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
key: ${{ env.cache-name }}
- name: Download ${{ matrix.stable-diffusion-model }}
id: download-stable-diffusion-model
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
run: |
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
- name: run configure_invokeai.py
id: run-preload-models
run: |
python scripts/configure_invokeai.py --skip-sd-weights --yes
- name: cat invokeai.init
id: cat-invokeai
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
- name: Run the tests
id: run-tests
if: matrix.os != 'windows-2022'
run: |
time python scripts/invoke.py \
--no-patchmatch \
--no-nsfw_checker \
--model ${{ matrix.stable-diffusion-model }} \
--from_file ${{ env.TEST_PROMPTS }} \
--root="${{ env.INVOKEAI_ROOT }}" \
--outdir="${{ env.INVOKEAI_ROOT }}/outputs"
- name: export conda env
id: export-conda-env
if: matrix.os != 'windows-2022'
run: |
mkdir -p outputs/img-samples
conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml
- name: Archive results
if: matrix.os != 'windows-2022'
id: archive-results
uses: actions/upload-artifact@v3
with:
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
path: ${{ env.INVOKEAI_ROOT }}/outputs

View File

@@ -4,141 +4,132 @@ on:
branches:
- 'main'
pull_request:
branches:
- 'main'
types:
- 'ready_for_review'
- 'opened'
- 'synchronize'
- 'converted_to_draft'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
fail_if_pull_request_is_draft:
if: github.event.pull_request.draft == true
runs-on: ubuntu-18.04
steps:
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
run: exit 1
matrix:
if: github.event.pull_request.draft == false
strategy:
matrix:
stable-diffusion-model:
- stable-diffusion-1.5
requirements-file:
- requirements-lin-cuda.txt
- requirements-lin-amd.txt
- requirements-mac-mps-cpu.txt
- requirements-win-colab-cuda.txt
python-version:
# - '3.9'
- '3.10'
pytorch:
# - linux-cuda-11_6
- linux-cuda-11_7
- linux-rocm-5_2
- linux-cpu
- macos-default
- windows-cpu
# - windows-cuda-11_6
# - windows-cuda-11_7
include:
- requirements-file: requirements-lin-cuda.txt
# - pytorch: linux-cuda-11_6
# os: ubuntu-22.04
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
# github-env: $GITHUB_ENV
- pytorch: linux-cuda-11_7
os: ubuntu-22.04
curl-command: curl
github-env: $GITHUB_ENV
- requirements-file: requirements-lin-amd.txt
- pytorch: linux-rocm-5_2
os: ubuntu-22.04
curl-command: curl
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
github-env: $GITHUB_ENV
- requirements-file: requirements-mac-mps-cpu.txt
- pytorch: linux-cpu
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/cpu'
github-env: $GITHUB_ENV
- pytorch: macos-default
os: macOS-12
curl-command: curl
github-env: $GITHUB_ENV
- requirements-file: requirements-win-colab-cuda.txt
- pytorch: windows-cpu
os: windows-2022
curl-command: curl.exe
github-env: $env:GITHUB_ENV
- stable-diffusion-model: stable-diffusion-1.5
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
# - pytorch: windows-cuda-11_6
# os: windows-2022
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
# github-env: $env:GITHUB_ENV
# - pytorch: windows-cuda-11_7
# os: windows-2022
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
# github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
env:
PIP_USE_PEP517: '1'
steps:
- name: Checkout sources
id: checkout-sources
uses: actions/checkout@v3
- name: set INVOKEAI_ROOT Windows
if: matrix.os == 'windows-2022'
run: |
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
- name: set INVOKEAI_ROOT others
if: matrix.os != 'windows-2022'
run: |
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
- name: create models.yaml from example
run: |
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
- name: set test prompt to development branch validation
if: ${{ github.ref == 'refs/heads/development' }}
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
- name: set test prompt to Pull Request validation
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
if: ${{ github.ref != 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: create requirements.txt
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
# cache: 'pip'
# cache-dependency-path: ${{ matrix.requirements-file }}
cache: pip
cache-dependency-path: pyproject.toml
- name: install dependencies
run: pip3 install --upgrade pip setuptools wheel
- name: install requirements
run: pip3 install -r '${{ matrix.requirements-file }}'
- name: Use Cached Stable Diffusion Model
id: cache-sd-model
uses: actions/cache@v3
- name: install invokeai
env:
cache-name: cache-${{ matrix.stable-diffusion-model }}
with:
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
key: ${{ env.cache-name }}
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
run: >
pip3 install
--editable=".[test]"
- name: Download ${{ matrix.stable-diffusion-model }}
id: download-stable-diffusion-model
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
run: |
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
- name: run pytest
id: run-pytest
run: pytest
- name: run configure_invokeai.py
- name: set INVOKEAI_OUTDIR
run: >
python -c
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
>> ${{ matrix.github-env }}
- name: run invokeai-configure
id: run-preload-models
run: python3 scripts/configure_invokeai.py --skip-sd-weights --yes
env:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
run: >
invokeai-configure
--yes
--default_only
--full-precision
# can't use fp16 weights without a GPU
- name: Run the tests
id: run-tests
if: matrix.os != 'windows-2022'
run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}"
- name: run invokeai
id: run-invokeai
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
HF_DATASETS_OFFLINE: 1
TRANSFORMERS_OFFLINE: 1
run: >
invokeai
--no-patchmatch
--no-nsfw_checker
--from_file ${{ env.TEST_PROMPTS }}
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
- name: Archive results
id: archive-results
if: matrix.os != 'windows-2022'
uses: actions/upload-artifact@v3
with:
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
path: ${{ env.INVOKEAI_ROOT }}/outputs
name: results
path: ${{ env.INVOKEAI_OUTDIR }}

7
.gitignore vendored
View File

@@ -1,4 +1,5 @@
# ignore default image save location and model symbolic link
embeddings/
outputs/
models/ldm/stable-diffusion-v1/model.ckpt
**/restoration/codeformer/weights
@@ -71,6 +72,7 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
junit/
# Translations
*.mo
@@ -194,7 +196,7 @@ checkpoints
.DS_Store
# Let the frontend manage its own gitignore
!frontend/*
!invokeai/frontend/*
# Scratch folder
.scratch/
@@ -229,8 +231,5 @@ installer/install.sh
installer/update.bat
installer/update.sh
# this may be present if the user created a venv
invokeai
# no longer stored in source directory
models

110
README.md
View File

@@ -1,6 +1,6 @@
<div align="center">
![project logo](docs/assets/invoke_ai_banner.png)
![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
# InvokeAI: A Stable Diffusion Toolkit
@@ -8,12 +8,10 @@
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
[![CI checks on main badge]][CI checks on main link] [![CI checks on dev badge]][CI checks on dev link] [![latest commit to dev badge]][latest commit to dev link]
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
@@ -26,19 +24,14 @@
[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development
[latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div>
This is a fork of
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
the open source text-to-image generator. It provides a streamlined
process with various new features and options to aid the image
generation process. It runs on Windows, macOS and Linux machines, with
GPU cards with as little as 4 GB of RAM. It provides both a polished
Web interface (see below), and an easy-to-use command-line interface.
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
@@ -46,6 +39,12 @@ _Note: InvokeAI is rapidly evolving. Please use the
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
<div align="center">
![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png)
</div>
# Getting Started with InvokeAI
For full installation and upgrade instructions, please see:
@@ -58,10 +57,7 @@ For full installation and upgrade instructions, please see:
5. Wait a while, until it is done.
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
8. Type `banana sushi` in the box on the top left and click `Invoke`:
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
8. Type `banana sushi` in the box on the top left and click `Invoke`
## Table of Contents
@@ -76,7 +72,7 @@ For full installation and upgrade instructions, please see:
8. [Support](#support)
9. [Further Reading](#further-reading)
### Installation
## Installation
This fork is supported across Linux, Windows and Macintosh. Linux
users can use either an Nvidia-based card (with CUDA support) or an
@@ -89,12 +85,14 @@ instructions, please see:
InvokeAI is supported across Linux, Windows and macOS. Linux
users can use either an Nvidia-based card (with CUDA support) or an
AMD card (using the ROCm driver).
#### System
You wil need one of the following:
You will need one of the following:
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- An Apple computer with an M1 chip.
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
We do not recommend the GTX 1650 or 1660 series video cards. They are
unable to run in half-precision mode and do not have sufficient VRAM
@@ -108,52 +106,48 @@ to render 512x512 images.
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
**Note**
## Features
If you have a Nvidia 10xx series card (e.g. the 1080ti), please
run the dream script in full-precision mode as shown below.
Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/)
Similarly, specify full-precision mode on Apple M1 hardware.
### *Web Server & UI*
Precision is auto configured based on the device. If however you encounter
errors like 'expected type Float but found Half' or 'not implemented for Half'
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server.
```bash
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
```
Or by updating your InvokeAI configuration file with this argument.
### *Unified Canvas*
### Features
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
#### Major Features
### *Advanced Prompt Syntax*
- [Web Server](https://invoke-ai.github.io/InvokeAI/features/WEB/)
- [Interactive Command Line Interface](https://invoke-ai.github.io/InvokeAI/features/CLI/)
- [Image To Image](https://invoke-ai.github.io/InvokeAI/features/IMG2IMG/)
- [Inpainting Support](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
- [Outpainting Support](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/)
- [Upscaling, face-restoration and outpainting](https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/)
- [Reading Prompts From File](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#reading-prompts-from-a-file)
- [Prompt Blending](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-blending)
- [Thresholding and Perlin Noise Initialization Options](https://invoke-ai.github.io/InvokeAI/features/OTHER/#thresholding-and-perlin-noise-initialization-options)
- [Negative/Unconditioned Prompts](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts)
- [Variations](https://invoke-ai.github.io/InvokeAI/features/VARIATIONS/)
- [Personalizing Text-to-Image Generation](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
- [Simplified API for text to image generation](https://invoke-ai.github.io/InvokeAI/features/OTHER/#simplified-api)
InvokeAI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space.
#### Other Features
### *Command Line Interface*
- [Google Colab](https://invoke-ai.github.io/InvokeAI/features/OTHER/#google-colab)
- [Seamless Tiling](https://invoke-ai.github.io/InvokeAI/features/OTHER/#seamless-tiling)
- [Shortcut: Reusing Seeds](https://invoke-ai.github.io/InvokeAI/features/OTHER/#shortcuts-reusing-seeds)
- [Preload Models](https://invoke-ai.github.io/InvokeAI/features/OTHER/#preload-models)
For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool.
### Other features
- *Support for both ckpt and diffusers models*
- *SD 2.0, 2.1 support*
- *Noise Control & Tresholding*
- *Popular Sampler Support*
- *Upscaling & Face Restoration Tools*
- *Embedding Manager & Support*
- *Model Manager & Support*
### Coming Soon
- *Node-Based Architecture & UI*
- And more...
### Latest Changes
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
For our latest changes, view our [Release
Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
[CHANGELOG](docs/CHANGELOG.md).
### Troubleshooting
## Troubleshooting
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
problems and other issues.
@@ -167,7 +161,7 @@ To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the
If you are unfamiliar with how
to contribute to GitHub projects, here is a
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
We hope you enjoy using our software as much as we enjoy creating it,
and we hope that some of those of you who are reading this will elect
@@ -183,13 +177,7 @@ their time, hard work and effort.
### Support
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
email if you use and like the script.
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
Original portions of the software are Copyright (c) 2022
[Lincoln D. Stein](https://github.com/lstein)
Original portions of the software are Copyright (c) 2023 by respective contributors.
### Further Reading
Please see the original README for more information on this software and underlying algorithm,
located in the file [README-CompViz.md](https://invoke-ai.github.io/InvokeAI/other/README-CompViz/).

View File

@@ -2,9 +2,10 @@
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
--extra-index-url https://download.pytorch.org/whl/cu116
--trusted-host https://download.pytorch.org
accelerate~=0.14
accelerate~=0.15
albumentations
diffusers
diffusers[torch]~=0.11
einops
eventlet
flask_cors
flask_socketio

View File

@@ -1,80 +0,0 @@
stable-diffusion-1.5:
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
repo_id: runwayml/stable-diffusion-v1-5
config: v1-inference.yaml
file: v1-5-pruned-emaonly.ckpt
recommended: true
width: 512
height: 512
inpainting-1.5:
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting
config: v1-inpainting-inference.yaml
file: sd-v1-5-inpainting.ckpt
recommended: True
width: 512
height: 512
ft-mse-improved-autoencoder-840000:
description: StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB)
repo_id: stabilityai/sd-vae-ft-mse-original
config: VAE/default
file: vae-ft-mse-840000-ema-pruned.ckpt
recommended: True
width: 512
height: 512
stable-diffusion-1.4:
description: The original Stable Diffusion version 1.4 weight file (4.27 GB)
repo_id: CompVis/stable-diffusion-v-1-4-original
config: v1-inference.yaml
file: sd-v1-4.ckpt
recommended: False
width: 512
height: 512
waifu-diffusion-1.3:
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
repo_id: hakurei/waifu-diffusion-v1-3
config: v1-inference.yaml
file: model-epoch09-float32.ckpt
recommended: False
width: 512
height: 512
trinart-2.0:
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures (2.13 GB)
repo_id: naclbit/trinart_stable_diffusion_v2
config: v1-inference.yaml
file: trinart2_step95000.ckpt
recommended: False
width: 512
height: 512
trinart_characters-1.0:
description: An SD model finetuned with 19.2M anime/manga style images (2.13 GB)
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
config: v1-inference.yaml
file: trinart_characters_it4_v1.ckpt
recommended: False
width: 512
height: 512
trinart_vae:
description: Custom autoencoder for trinart_characters
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
config: VAE/trinart
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
recommended: False
width: 512
height: 512
papercut-1.0:
description: SD 1.5 fine-tuned for papercut art (use "PaperCut" in your prompts) (2.13 GB)
repo_id: Fictiverse/Stable_Diffusion_PaperCut_Model
config: v1-inference.yaml
file: PaperCut_v1.ckpt
recommended: False
width: 512
height: 512
voxel_art-1.0:
description: Stable Diffusion trained on voxel art (use "VoxelArt" in your prompts) (4.27 GB)
repo_id: Fictiverse/Stable_Diffusion_VoxelArt_Model
config: v1-inference.yaml
file: VoxelArt_v1.ckpt
recommended: False
width: 512
height: 512

View File

@@ -1,803 +0,0 @@
sd-concepts-library/001glitch-core
sd-concepts-library/2814-roth
sd-concepts-library/3d-female-cyborgs
sd-concepts-library/4tnght
sd-concepts-library/80s-anime-ai
sd-concepts-library/80s-anime-ai-being
sd-concepts-library/852style-girl
sd-concepts-library/8bit
sd-concepts-library/8sconception
sd-concepts-library/Aflac-duck
sd-concepts-library/Akitsuki
sd-concepts-library/Atako
sd-concepts-library/Exodus-Styling
sd-concepts-library/RINGAO
sd-concepts-library/a-female-hero-from-the-legend-of-mir
sd-concepts-library/a-hat-kid
sd-concepts-library/a-tale-of-two-empires
sd-concepts-library/aadhav-face
sd-concepts-library/aavegotchi
sd-concepts-library/abby-face
sd-concepts-library/abstract-concepts
sd-concepts-library/accurate-angel
sd-concepts-library/agm-style-nao
sd-concepts-library/aj-fosik
sd-concepts-library/alberto-mielgo
sd-concepts-library/alex-portugal
sd-concepts-library/alex-thumbnail-object-2000-steps
sd-concepts-library/aleyna-tilki
sd-concepts-library/alf
sd-concepts-library/alicebeta
sd-concepts-library/alien-avatar
sd-concepts-library/alisa
sd-concepts-library/all-rings-albuns
sd-concepts-library/altvent
sd-concepts-library/altyn-helmet
sd-concepts-library/amine
sd-concepts-library/amogus
sd-concepts-library/anders-zorn
sd-concepts-library/angus-mcbride-style
sd-concepts-library/animalve3-1500seq
sd-concepts-library/anime-background-style
sd-concepts-library/anime-background-style-v2
sd-concepts-library/anime-boy
sd-concepts-library/anime-girl
sd-concepts-library/anyXtronXredshift
sd-concepts-library/anya-forger
sd-concepts-library/apex-wingman
sd-concepts-library/apulian-rooster-v0-1
sd-concepts-library/arcane-face
sd-concepts-library/arcane-style-jv
sd-concepts-library/arcimboldo-style
sd-concepts-library/armando-reveron-style
sd-concepts-library/armor-concept
sd-concepts-library/arq-render
sd-concepts-library/art-brut
sd-concepts-library/arthur1
sd-concepts-library/artist-yukiko-kanagai
sd-concepts-library/arwijn
sd-concepts-library/ashiok
sd-concepts-library/at-wolf-boy-object
sd-concepts-library/atm-ant
sd-concepts-library/atm-ant-2
sd-concepts-library/axe-tattoo
sd-concepts-library/ayush-spider-spr
sd-concepts-library/azura-from-vibrant-venture
sd-concepts-library/ba-shiroko
sd-concepts-library/babau
sd-concepts-library/babs-bunny
sd-concepts-library/babushork
sd-concepts-library/backrooms
sd-concepts-library/bad_Hub_Hugh
sd-concepts-library/bada-club
sd-concepts-library/baldi
sd-concepts-library/baluchitherian
sd-concepts-library/bamse
sd-concepts-library/bamse-og-kylling
sd-concepts-library/bee
sd-concepts-library/beholder
sd-concepts-library/beldam
sd-concepts-library/belen
sd-concepts-library/bella-goth
sd-concepts-library/belle-delphine
sd-concepts-library/bert-muppet
sd-concepts-library/better-collage3
sd-concepts-library/between2-mt-fade
sd-concepts-library/birb-style
sd-concepts-library/black-and-white-design
sd-concepts-library/black-waifu
sd-concepts-library/bloo
sd-concepts-library/blue-haired-boy
sd-concepts-library/blue-zombie
sd-concepts-library/blue-zombiee
sd-concepts-library/bluebey
sd-concepts-library/bluebey-2
sd-concepts-library/bobs-burgers
sd-concepts-library/boissonnard
sd-concepts-library/bonzi-monkey
sd-concepts-library/borderlands
sd-concepts-library/bored-ape-textual-inversion
sd-concepts-library/boris-anderson
sd-concepts-library/bozo-22
sd-concepts-library/breakcore
sd-concepts-library/brittney-williams-art
sd-concepts-library/bruma
sd-concepts-library/brunnya
sd-concepts-library/buddha-statue
sd-concepts-library/bullvbear
sd-concepts-library/button-eyes
sd-concepts-library/canadian-goose
sd-concepts-library/canary-cap
sd-concepts-library/cancer_style
sd-concepts-library/captain-haddock
sd-concepts-library/captainkirb
sd-concepts-library/car-toy-rk
sd-concepts-library/carasibana
sd-concepts-library/carlitos-el-mago
sd-concepts-library/carrascharacter
sd-concepts-library/cartoona-animals
sd-concepts-library/cat-toy
sd-concepts-library/centaur
sd-concepts-library/cgdonny1
sd-concepts-library/cham
sd-concepts-library/chandra-nalaar
sd-concepts-library/char-con
sd-concepts-library/character-pingu
sd-concepts-library/cheburashka
sd-concepts-library/chen-1
sd-concepts-library/child-zombie
sd-concepts-library/chillpill
sd-concepts-library/chonkfrog
sd-concepts-library/chop
sd-concepts-library/christo-person
sd-concepts-library/chuck-walton
sd-concepts-library/chucky
sd-concepts-library/chungus-poodl-pet
sd-concepts-library/cindlop
sd-concepts-library/collage-cutouts
sd-concepts-library/collage14
sd-concepts-library/collage3
sd-concepts-library/collage3-hubcity
sd-concepts-library/cologne
sd-concepts-library/color-page
sd-concepts-library/colossus
sd-concepts-library/command-and-conquer-remastered-cameos
sd-concepts-library/concept-art
sd-concepts-library/conner-fawcett-style
sd-concepts-library/conway-pirate
sd-concepts-library/coop-himmelblau
sd-concepts-library/coraline
sd-concepts-library/cornell-box
sd-concepts-library/cortana
sd-concepts-library/covid-19-rapid-test
sd-concepts-library/cow-uwu
sd-concepts-library/cowboy
sd-concepts-library/crazy-1
sd-concepts-library/crazy-2
sd-concepts-library/crb-portraits
sd-concepts-library/crb-surrealz
sd-concepts-library/crbart
sd-concepts-library/crested-gecko
sd-concepts-library/crinos-form-garou
sd-concepts-library/cry-baby-style
sd-concepts-library/crybaby-style-2-0
sd-concepts-library/csgo-awp-object
sd-concepts-library/csgo-awp-texture-map
sd-concepts-library/cubex
sd-concepts-library/cumbia-peruana
sd-concepts-library/cute-bear
sd-concepts-library/cute-cat
sd-concepts-library/cute-game-style
sd-concepts-library/cyberpunk-lucy
sd-concepts-library/dabotap
sd-concepts-library/dan-mumford
sd-concepts-library/dan-seagrave-art-style
sd-concepts-library/dark-penguin-pinguinanimations
sd-concepts-library/darkpenguinanimatronic
sd-concepts-library/darkplane
sd-concepts-library/david-firth-artstyle
sd-concepts-library/david-martinez-cyberpunk
sd-concepts-library/david-martinez-edgerunners
sd-concepts-library/david-moreno-architecture
sd-concepts-library/daycare-attendant-sun-fnaf
sd-concepts-library/ddattender
sd-concepts-library/degods
sd-concepts-library/degodsheavy
sd-concepts-library/depthmap
sd-concepts-library/depthmap-style
sd-concepts-library/design
sd-concepts-library/detectivedinosaur1
sd-concepts-library/diaosu-toy
sd-concepts-library/dicoo
sd-concepts-library/dicoo2
sd-concepts-library/dishonored-portrait-styles
sd-concepts-library/disquieting-muses
sd-concepts-library/ditko
sd-concepts-library/dlooak
sd-concepts-library/doc
sd-concepts-library/doener-red-line-art
sd-concepts-library/dog
sd-concepts-library/dog-django
sd-concepts-library/doge-pound
sd-concepts-library/dong-ho
sd-concepts-library/dong-ho2
sd-concepts-library/doose-s-realistic-art-style
sd-concepts-library/dq10-anrushia
sd-concepts-library/dr-livesey
sd-concepts-library/dr-strange
sd-concepts-library/dragonborn
sd-concepts-library/dreamcore
sd-concepts-library/dreamy-painting
sd-concepts-library/drive-scorpion-jacket
sd-concepts-library/dsmuses
sd-concepts-library/dtv-pkmn
sd-concepts-library/dullboy-caricature
sd-concepts-library/duranduran
sd-concepts-library/durer-style
sd-concepts-library/dyoudim-style
sd-concepts-library/early-mishima-kurone
sd-concepts-library/eastward
sd-concepts-library/eddie
sd-concepts-library/edgerunners-style
sd-concepts-library/edgerunners-style-v2
sd-concepts-library/el-salvador-style-style
sd-concepts-library/elegant-flower
sd-concepts-library/elspeth-tirel
sd-concepts-library/eru-chitanda-casual
sd-concepts-library/erwin-olaf-style
sd-concepts-library/ettblackteapot
sd-concepts-library/explosions-cat
sd-concepts-library/eye-of-agamotto
sd-concepts-library/f-22
sd-concepts-library/facadeplace
sd-concepts-library/fairy-tale-painting-style
sd-concepts-library/fairytale
sd-concepts-library/fang-yuan-001
sd-concepts-library/faraon-love-shady
sd-concepts-library/fasina
sd-concepts-library/felps
sd-concepts-library/female-kpop-singer
sd-concepts-library/fergal-cat
sd-concepts-library/filename-2
sd-concepts-library/fileteado-porteno
sd-concepts-library/final-fantasy-logo
sd-concepts-library/fireworks-over-water
sd-concepts-library/fish
sd-concepts-library/flag-ussr
sd-concepts-library/flatic
sd-concepts-library/floral
sd-concepts-library/fluid-acrylic-jellyfish-creatures-style-of-carl-ingram-art
sd-concepts-library/fnf-boyfriend
sd-concepts-library/fold-structure
sd-concepts-library/fox-purple
sd-concepts-library/fractal
sd-concepts-library/fractal-flame
sd-concepts-library/fractal-temple-style
sd-concepts-library/frank-frazetta
sd-concepts-library/franz-unterberger
sd-concepts-library/freddy-fazbear
sd-concepts-library/freefonix-style
sd-concepts-library/furrpopasthetic
sd-concepts-library/fursona
sd-concepts-library/fzk
sd-concepts-library/galaxy-explorer
sd-concepts-library/ganyu-genshin-impact
sd-concepts-library/garcon-the-cat
sd-concepts-library/garfield-pizza-plush
sd-concepts-library/garfield-pizza-plush-v2
sd-concepts-library/gba-fe-class-cards
sd-concepts-library/gba-pokemon-sprites
sd-concepts-library/geggin
sd-concepts-library/ggplot2
sd-concepts-library/ghost-style
sd-concepts-library/ghostproject-men
sd-concepts-library/gibasachan-v0
sd-concepts-library/gim
sd-concepts-library/gio
sd-concepts-library/giygas
sd-concepts-library/glass-pipe
sd-concepts-library/glass-prism-cube
sd-concepts-library/glow-forest
sd-concepts-library/goku
sd-concepts-library/gram-tops
sd-concepts-library/green-blue-shanshui
sd-concepts-library/green-tent
sd-concepts-library/grifter
sd-concepts-library/grisstyle
sd-concepts-library/grit-toy
sd-concepts-library/gt-color-paint-2
sd-concepts-library/gta5-artwork
sd-concepts-library/guttestreker
sd-concepts-library/gymnastics-leotard-v2
sd-concepts-library/half-life-2-dog
sd-concepts-library/handstand
sd-concepts-library/hanfu-anime-style
sd-concepts-library/happy-chaos
sd-concepts-library/happy-person12345
sd-concepts-library/happy-person12345-assets
sd-concepts-library/harley-quinn
sd-concepts-library/harmless-ai-1
sd-concepts-library/harmless-ai-house-style-1
sd-concepts-library/hd-emoji
sd-concepts-library/heather
sd-concepts-library/henjo-techno-show
sd-concepts-library/herge-style
sd-concepts-library/hiten-style-nao
sd-concepts-library/hitokomoru-style-nao
sd-concepts-library/hiyuki-chan
sd-concepts-library/hk-bamboo
sd-concepts-library/hk-betweenislands
sd-concepts-library/hk-bicycle
sd-concepts-library/hk-blackandwhite
sd-concepts-library/hk-breakfast
sd-concepts-library/hk-buses
sd-concepts-library/hk-clouds
sd-concepts-library/hk-goldbuddha
sd-concepts-library/hk-goldenlantern
sd-concepts-library/hk-hkisland
sd-concepts-library/hk-leaves
sd-concepts-library/hk-market
sd-concepts-library/hk-oldcamera
sd-concepts-library/hk-opencamera
sd-concepts-library/hk-peach
sd-concepts-library/hk-phonevax
sd-concepts-library/hk-streetpeople
sd-concepts-library/hk-vintage
sd-concepts-library/hoi4
sd-concepts-library/hoi4-leaders
sd-concepts-library/homestuck-sprite
sd-concepts-library/homestuck-troll
sd-concepts-library/hours-sentry-fade
sd-concepts-library/hours-style
sd-concepts-library/hrgiger-drmacabre
sd-concepts-library/huang-guang-jian
sd-concepts-library/huatli
sd-concepts-library/huayecai820-greyscale
sd-concepts-library/hub-city
sd-concepts-library/hubris-oshri
sd-concepts-library/huckleberry
sd-concepts-library/hydrasuit
sd-concepts-library/i-love-chaos
sd-concepts-library/ibere-thenorio
sd-concepts-library/ic0n
sd-concepts-library/ie-gravestone
sd-concepts-library/ikea-fabler
sd-concepts-library/illustration-style
sd-concepts-library/ilo-kunst
sd-concepts-library/ilya-shkipin
sd-concepts-library/im-poppy
sd-concepts-library/ina-art
sd-concepts-library/indian-watercolor-portraits
sd-concepts-library/indiana
sd-concepts-library/ingmar-bergman
sd-concepts-library/insidewhale
sd-concepts-library/interchanges
sd-concepts-library/inuyama-muneto-style-nao
sd-concepts-library/irasutoya
sd-concepts-library/iridescent-illustration-style
sd-concepts-library/iridescent-photo-style
sd-concepts-library/isabell-schulte-pv-pvii-3000steps
sd-concepts-library/isabell-schulte-pviii-1-image-style
sd-concepts-library/isabell-schulte-pviii-1024px-1500-steps-style
sd-concepts-library/isabell-schulte-pviii-12tiles-3000steps-style
sd-concepts-library/isabell-schulte-pviii-4-tiles-1-lr-3000-steps-style
sd-concepts-library/isabell-schulte-pviii-4-tiles-3-lr-5000-steps-style
sd-concepts-library/isabell-schulte-pviii-4tiles-500steps
sd-concepts-library/isabell-schulte-pviii-4tiles-6000steps
sd-concepts-library/isabell-schulte-pviii-style
sd-concepts-library/isometric-tile-test
sd-concepts-library/jacqueline-the-unicorn
sd-concepts-library/james-web-space-telescope
sd-concepts-library/jamie-hewlett-style
sd-concepts-library/jamiels
sd-concepts-library/jang-sung-rak-style
sd-concepts-library/jetsetdreamcastcovers
sd-concepts-library/jin-kisaragi
sd-concepts-library/jinjoon-lee-they
sd-concepts-library/jm-bergling-monogram
sd-concepts-library/joe-mad
sd-concepts-library/joe-whiteford-art-style
sd-concepts-library/joemad
sd-concepts-library/john-blanche
sd-concepts-library/johnny-silverhand
sd-concepts-library/jojo-bizzare-adventure-manga-lineart
sd-concepts-library/jos-de-kat
sd-concepts-library/junji-ito-artstyle
sd-concepts-library/kaleido
sd-concepts-library/kaneoya-sachiko
sd-concepts-library/kanovt
sd-concepts-library/kanv1
sd-concepts-library/karan-gloomy
sd-concepts-library/karl-s-lzx-1
sd-concepts-library/kasumin
sd-concepts-library/kawaii-colors
sd-concepts-library/kawaii-girl-plus-object
sd-concepts-library/kawaii-girl-plus-style
sd-concepts-library/kawaii-girl-plus-style-v1-1
sd-concepts-library/kay
sd-concepts-library/kaya-ghost-assasin
sd-concepts-library/ki
sd-concepts-library/kinda-sus
sd-concepts-library/kings-quest-agd
sd-concepts-library/kiora
sd-concepts-library/kira-sensei
sd-concepts-library/kirby
sd-concepts-library/klance
sd-concepts-library/kodakvision500t
sd-concepts-library/kogatan-shiny
sd-concepts-library/kogecha
sd-concepts-library/kojima-ayami
sd-concepts-library/koko-dog
sd-concepts-library/kuvshinov
sd-concepts-library/kysa-v-style
sd-concepts-library/laala-character
sd-concepts-library/larrette
sd-concepts-library/lavko
sd-concepts-library/lazytown-stephanie
sd-concepts-library/ldr
sd-concepts-library/ldrs
sd-concepts-library/led-toy
sd-concepts-library/lego-astronaut
sd-concepts-library/leica
sd-concepts-library/leif-jones
sd-concepts-library/lex
sd-concepts-library/liliana
sd-concepts-library/liliana-vess
sd-concepts-library/liminal-spaces-2-0
sd-concepts-library/liminalspaces
sd-concepts-library/line-art
sd-concepts-library/line-style
sd-concepts-library/linnopoke
sd-concepts-library/liquid-light
sd-concepts-library/liqwid-aquafarmer
sd-concepts-library/lizardman
sd-concepts-library/loab-character
sd-concepts-library/loab-style
sd-concepts-library/lofa
sd-concepts-library/logo-with-face-on-shield
sd-concepts-library/lolo
sd-concepts-library/looney-anime
sd-concepts-library/lost-rapper
sd-concepts-library/lphr-style
sd-concepts-library/lucario
sd-concepts-library/lucky-luke
sd-concepts-library/lugal-ki-en
sd-concepts-library/luinv2
sd-concepts-library/lula-13
sd-concepts-library/lumio
sd-concepts-library/lxj-o4
sd-concepts-library/m-geo
sd-concepts-library/m-geoo
sd-concepts-library/madhubani-art
sd-concepts-library/mafalda-character
sd-concepts-library/magic-pengel
sd-concepts-library/malika-favre-art-style
sd-concepts-library/manga-style
sd-concepts-library/marbling-art
sd-concepts-library/margo
sd-concepts-library/marty
sd-concepts-library/marty6
sd-concepts-library/mass
sd-concepts-library/masyanya
sd-concepts-library/masyunya
sd-concepts-library/mate
sd-concepts-library/matthew-stone
sd-concepts-library/mattvidpro
sd-concepts-library/maurice-quentin-de-la-tour-style
sd-concepts-library/maus
sd-concepts-library/max-foley
sd-concepts-library/mayor-richard-irvin
sd-concepts-library/mechasoulall
sd-concepts-library/medazzaland
sd-concepts-library/memnarch-mtg
sd-concepts-library/metagabe
sd-concepts-library/meyoco
sd-concepts-library/meze-audio-elite-headphones
sd-concepts-library/midjourney-style
sd-concepts-library/mikako-method
sd-concepts-library/mikako-methodi2i
sd-concepts-library/miko-3-robot
sd-concepts-library/milady
sd-concepts-library/mildemelwe-style
sd-concepts-library/million-live-akane-15k
sd-concepts-library/million-live-akane-3k
sd-concepts-library/million-live-akane-shifuku-3k
sd-concepts-library/million-live-spade-q-object-3k
sd-concepts-library/million-live-spade-q-style-3k
sd-concepts-library/minecraft-concept-art
sd-concepts-library/mishima-kurone
sd-concepts-library/mizkif
sd-concepts-library/moeb-style
sd-concepts-library/moebius
sd-concepts-library/mokoko
sd-concepts-library/mokoko-seed
sd-concepts-library/monster-girl
sd-concepts-library/monster-toy
sd-concepts-library/monte-novo
sd-concepts-library/moo-moo
sd-concepts-library/morino-hon-style
sd-concepts-library/moxxi
sd-concepts-library/msg
sd-concepts-library/mtg-card
sd-concepts-library/mtl-longsky
sd-concepts-library/mu-sadr
sd-concepts-library/munch-leaks-style
sd-concepts-library/museum-by-coop-himmelblau
sd-concepts-library/muxoyara
sd-concepts-library/my-hero-academia-style
sd-concepts-library/my-mug
sd-concepts-library/mycat
sd-concepts-library/mystical-nature
sd-concepts-library/naf
sd-concepts-library/nahiri
sd-concepts-library/namine-ritsu
sd-concepts-library/naoki-saito
sd-concepts-library/nard-style
sd-concepts-library/naruto
sd-concepts-library/natasha-johnston
sd-concepts-library/nathan-wyatt
sd-concepts-library/naval-portrait
sd-concepts-library/nazuna
sd-concepts-library/nebula
sd-concepts-library/ned-flanders
sd-concepts-library/neon-pastel
sd-concepts-library/new-priests
sd-concepts-library/nic-papercuts
sd-concepts-library/nikodim
sd-concepts-library/nissa-revane
sd-concepts-library/nixeu
sd-concepts-library/noggles
sd-concepts-library/nomad
sd-concepts-library/nouns-glasses
sd-concepts-library/obama-based-on-xi
sd-concepts-library/obama-self-2
sd-concepts-library/og-mox-style
sd-concepts-library/ohisashiburi-style
sd-concepts-library/oleg-kuvaev
sd-concepts-library/olli-olli
sd-concepts-library/on-kawara
sd-concepts-library/one-line-drawing
sd-concepts-library/onepunchman
sd-concepts-library/onzpo
sd-concepts-library/orangejacket
sd-concepts-library/ori
sd-concepts-library/ori-toor
sd-concepts-library/orientalist-art
sd-concepts-library/osaka-jyo
sd-concepts-library/osaka-jyo2
sd-concepts-library/osrsmini2
sd-concepts-library/osrstiny
sd-concepts-library/other-mother
sd-concepts-library/ouroboros
sd-concepts-library/outfit-items
sd-concepts-library/overprettified
sd-concepts-library/owl-house
sd-concepts-library/painted-by-silver-of-999
sd-concepts-library/painted-by-silver-of-999-2
sd-concepts-library/painted-student
sd-concepts-library/painting
sd-concepts-library/pantone-milk
sd-concepts-library/paolo-bonolis
sd-concepts-library/party-girl
sd-concepts-library/pascalsibertin
sd-concepts-library/pastelartstyle
sd-concepts-library/paul-noir
sd-concepts-library/pen-ink-portraits-bennorthen
sd-concepts-library/phan
sd-concepts-library/phan-s-collage
sd-concepts-library/phc
sd-concepts-library/phoenix-01
sd-concepts-library/pineda-david
sd-concepts-library/pink-beast-pastelae-style
sd-concepts-library/pintu
sd-concepts-library/pion-by-august-semionov
sd-concepts-library/piotr-jablonski
sd-concepts-library/pixel-mania
sd-concepts-library/pixel-toy
sd-concepts-library/pjablonski-style
sd-concepts-library/plant-style
sd-concepts-library/plen-ki-mun
sd-concepts-library/pokemon-conquest-sprites
sd-concepts-library/pool-test
sd-concepts-library/poolrooms
sd-concepts-library/poring-ragnarok-online
sd-concepts-library/poutine-dish
sd-concepts-library/princess-knight-art
sd-concepts-library/progress-chip
sd-concepts-library/puerquis-toy
sd-concepts-library/purplefishli
sd-concepts-library/pyramidheadcosplay
sd-concepts-library/qpt-atrium
sd-concepts-library/quiesel
sd-concepts-library/r-crumb-style
sd-concepts-library/rahkshi-bionicle
sd-concepts-library/raichu
sd-concepts-library/rail-scene
sd-concepts-library/rail-scene-style
sd-concepts-library/ralph-mcquarrie
sd-concepts-library/ransom
sd-concepts-library/rayne-weynolds
sd-concepts-library/rcrumb-portraits-style
sd-concepts-library/rd-chaos
sd-concepts-library/rd-paintings
sd-concepts-library/red-glasses
sd-concepts-library/reeducation-camp
sd-concepts-library/reksio-dog
sd-concepts-library/rektguy
sd-concepts-library/remert
sd-concepts-library/renalla
sd-concepts-library/repeat
sd-concepts-library/retro-girl
sd-concepts-library/retro-mecha-rangers
sd-concepts-library/retropixelart-pinguin
sd-concepts-library/rex-deno
sd-concepts-library/rhizomuse-machine-bionic-sculpture
sd-concepts-library/ricar
sd-concepts-library/rickyart
sd-concepts-library/rico-face
sd-concepts-library/riker-doll
sd-concepts-library/rikiart
sd-concepts-library/rikiboy-art
sd-concepts-library/rilakkuma
sd-concepts-library/rishusei-style
sd-concepts-library/rj-palmer
sd-concepts-library/rl-pkmn-test
sd-concepts-library/road-to-ruin
sd-concepts-library/robertnava
sd-concepts-library/roblox-avatar
sd-concepts-library/roy-lichtenstein
sd-concepts-library/ruan-jia
sd-concepts-library/russian
sd-concepts-library/s1m-naoto-ohshima
sd-concepts-library/saheeli-rai
sd-concepts-library/sakimi-style
sd-concepts-library/salmonid
sd-concepts-library/sam-yang
sd-concepts-library/sanguo-guanyu
sd-concepts-library/sas-style
sd-concepts-library/scarlet-witch
sd-concepts-library/schloss-mosigkau
sd-concepts-library/scrap-style
sd-concepts-library/scratch-project
sd-concepts-library/sculptural-style
sd-concepts-library/sd-concepts-library-uma-meme
sd-concepts-library/seamless-ground
sd-concepts-library/selezneva-alisa
sd-concepts-library/sem-mac2n
sd-concepts-library/senneca
sd-concepts-library/seraphimmoonshadow-art
sd-concepts-library/sewerslvt
sd-concepts-library/she-hulk-law-art
sd-concepts-library/she-mask
sd-concepts-library/sherhook-painting
sd-concepts-library/sherhook-painting-v2
sd-concepts-library/shev-linocut
sd-concepts-library/shigure-ui-style
sd-concepts-library/shiny-polyman
sd-concepts-library/shrunken-head
sd-concepts-library/shu-doll
sd-concepts-library/shvoren-style
sd-concepts-library/sims-2-portrait
sd-concepts-library/singsing
sd-concepts-library/singsing-doll
sd-concepts-library/sintez-ico
sd-concepts-library/skyfalls
sd-concepts-library/slm
sd-concepts-library/smarties
sd-concepts-library/smiling-friend-style
sd-concepts-library/smooth-pencils
sd-concepts-library/smurf-style
sd-concepts-library/smw-map
sd-concepts-library/society-finch
sd-concepts-library/sorami-style
sd-concepts-library/spider-gwen
sd-concepts-library/spritual-monsters
sd-concepts-library/stable-diffusion-conceptualizer
sd-concepts-library/star-tours-posters
sd-concepts-library/stardew-valley-pixel-art
sd-concepts-library/starhavenmachinegods
sd-concepts-library/sterling-archer
sd-concepts-library/stretch-re1-robot
sd-concepts-library/stuffed-penguin-toy
sd-concepts-library/style-of-marc-allante
sd-concepts-library/summie-style
sd-concepts-library/sunfish
sd-concepts-library/super-nintendo-cartridge
sd-concepts-library/supitcha-mask
sd-concepts-library/sushi-pixel
sd-concepts-library/swamp-choe-2
sd-concepts-library/t-skrang
sd-concepts-library/takuji-kawano
sd-concepts-library/tamiyo
sd-concepts-library/tangles
sd-concepts-library/tb303
sd-concepts-library/tcirle
sd-concepts-library/teelip-ir-landscape
sd-concepts-library/teferi
sd-concepts-library/tela-lenca
sd-concepts-library/tela-lenca2
sd-concepts-library/terraria-style
sd-concepts-library/tesla-bot
sd-concepts-library/test
sd-concepts-library/test-epson
sd-concepts-library/test2
sd-concepts-library/testing
sd-concepts-library/thalasin
sd-concepts-library/thegeneral
sd-concepts-library/thorneworks
sd-concepts-library/threestooges
sd-concepts-library/thunderdome-cover
sd-concepts-library/thunderdome-covers
sd-concepts-library/ti-junglepunk-v0
sd-concepts-library/tili-concept
sd-concepts-library/titan-robot
sd-concepts-library/tnj
sd-concepts-library/toho-pixel
sd-concepts-library/tomcat
sd-concepts-library/tonal1
sd-concepts-library/tony-diterlizzi-s-planescape-art
sd-concepts-library/towerplace
sd-concepts-library/toy
sd-concepts-library/toy-bonnie-plush
sd-concepts-library/toyota-sera
sd-concepts-library/transmutation-circles
sd-concepts-library/trash-polka-artstyle
sd-concepts-library/travis-bedel
sd-concepts-library/trigger-studio
sd-concepts-library/trust-support
sd-concepts-library/trypophobia
sd-concepts-library/ttte
sd-concepts-library/tubby
sd-concepts-library/tubby-cats
sd-concepts-library/tudisco
sd-concepts-library/turtlepics
sd-concepts-library/type
sd-concepts-library/ugly-sonic
sd-concepts-library/uliana-kudinova
sd-concepts-library/uma
sd-concepts-library/uma-clean-object
sd-concepts-library/uma-meme
sd-concepts-library/uma-meme-style
sd-concepts-library/uma-style-classic
sd-concepts-library/unfinished-building
sd-concepts-library/urivoldemort
sd-concepts-library/uzumaki
sd-concepts-library/valorantstyle
sd-concepts-library/vb-mox
sd-concepts-library/vcr-classique
sd-concepts-library/venice
sd-concepts-library/vespertine
sd-concepts-library/victor-narm
sd-concepts-library/vietstoneking
sd-concepts-library/vivien-reid
sd-concepts-library/vkuoo1
sd-concepts-library/vraska
sd-concepts-library/w3u
sd-concepts-library/walter-wick-photography
sd-concepts-library/warhammer-40k-drawing-style
sd-concepts-library/waterfallshadow
sd-concepts-library/wayne-reynolds-character
sd-concepts-library/wedding
sd-concepts-library/wedding-HandPainted
sd-concepts-library/werebloops
sd-concepts-library/wheatland
sd-concepts-library/wheatland-arknight
sd-concepts-library/wheelchair
sd-concepts-library/wildkat
sd-concepts-library/willy-hd
sd-concepts-library/wire-angels
sd-concepts-library/wish-artist-stile
sd-concepts-library/wlop-style
sd-concepts-library/wojak
sd-concepts-library/wojaks-now
sd-concepts-library/wojaks-now-now-now
sd-concepts-library/xatu
sd-concepts-library/xatu2
sd-concepts-library/xbh
sd-concepts-library/xi
sd-concepts-library/xidiversity
sd-concepts-library/xioboma
sd-concepts-library/xuna
sd-concepts-library/xyz
sd-concepts-library/yb-anime
sd-concepts-library/yerba-mate
sd-concepts-library/yesdelete
sd-concepts-library/yf21
sd-concepts-library/yilanov2
sd-concepts-library/yinit
sd-concepts-library/yoji-shinkawa-style
sd-concepts-library/yolandi-visser
sd-concepts-library/yoshi
sd-concepts-library/youpi2
sd-concepts-library/youtooz-candy
sd-concepts-library/yuji-himukai-style
sd-concepts-library/zaney
sd-concepts-library/zaneypixelz
sd-concepts-library/zdenek-art
sd-concepts-library/zero
sd-concepts-library/zero-bottle
sd-concepts-library/zero-suit-samus
sd-concepts-library/zillertal-can
sd-concepts-library/zizigooloo
sd-concepts-library/zk
sd-concepts-library/zoroark

View File

@@ -1,65 +0,0 @@
FROM python:3.10-slim AS builder
# use bash
SHELL [ "/bin/bash", "-c" ]
# Install necesarry packages
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
gcc=4:10.2.* \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
python3-dev=3.9.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# set WORKDIR, PATH and copy sources
ARG APPDIR=/usr/src/app
WORKDIR ${APPDIR}
ENV PATH ${APPDIR}/.venv/bin:$PATH
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
# install requirements
RUN python3 -m venv .venv \
&& pip install \
--upgrade \
--no-cache-dir \
'wheel>=0.38.4' \
&& pip install \
--no-cache-dir \
-r ${PIP_REQUIREMENTS}
FROM python:3.10-slim AS runtime
# setup environment
ARG APPDIR=/usr/src/app
WORKDIR ${APPDIR}
COPY --from=builder ${APPDIR} .
ENV \
PATH=${APPDIR}/.venv/bin:$PATH \
INVOKEAI_ROOT=/data \
INVOKE_MODEL_RECONFIGURE=--yes
# Install necesarry packages
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.* \
&& ln -sf \
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
&& python3 -c "from patchmatch import patch_match" \
&& apt-get remove -y \
--autoremove \
build-essential \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/*
# set Entrypoint and default CMD
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
CMD [ "--web", "--host=0.0.0.0" ]

View File

@@ -1,86 +0,0 @@
#######################
#### Builder stage ####
FROM library/ubuntu:22.04 AS builder
ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt-get install -y \
git \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
python3-opencv \
libopencv-dev
# This is needed for patchmatch support
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
ln -sf opencv4.pc opencv.pc
ARG WORKDIR=/invokeai
WORKDIR ${WORKDIR}
ENV VIRTUAL_ENV=${WORKDIR}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN --mount=type=cache,target=/root/.cache/pip \
python3 -m venv ${VIRTUAL_ENV} &&\
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
torch==1.12.0+cu116 \
torchvision==0.13.0+cu116 &&\
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
COPY . .
RUN --mount=type=cache,target=/root/.cache/pip \
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
pip install -r requirements.txt &&\
pip install -e .
#######################
#### Runtime stage ####
FROM library/ubuntu:22.04 as runtime
ARG DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt install -y --no-install-recommends \
git \
curl \
ncdu \
iotop \
bzip2 \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
python3-opencv \
libopencv-dev &&\
apt-get clean && apt-get autoclean
ARG WORKDIR=/invokeai
WORKDIR ${WORKDIR}
ENV INVOKEAI_ROOT=/mnt/invokeai
ENV VIRTUAL_ENV=${WORKDIR}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
COPY --from=builder ${WORKDIR} ${WORKDIR}
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
# build patchmatch
RUN python -c "from patchmatch import patch_match"
## workaround for non-existent initfile when runtime directory is mounted; see #1613
RUN touch /root/.invokeai
ENTRYPOINT ["bash"]
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]

View File

@@ -1,44 +0,0 @@
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
INVOKEAI_ROOT=/mnt/invokeai
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
HOST_MOUNT_PATH=${HOME}/invokeai
IMAGE=local/invokeai:latest
USER=$(shell id -u)
GROUP=$(shell id -g)
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
# This is consistent with the expected non-Docker behaviour.
# Contents can be moved to a persistent storage and used to prime the cache on another host.
build:
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
configure:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
${IMAGE} -c "python scripts/configure_invokeai.py"
# Run the container with the runtime dir mounted and the web server exposed on port 9090
web:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
-p 9090:9090 \
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
# Run the cli with the runtime dir mounted
cli:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
${IMAGE} -c "python scripts/invoke.py"
# Run the container with the runtime dir mounted and open a bash shell
shell:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
.PHONY: build configure web cli shell

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
source ./docker-build/env.sh \
|| echo "please execute docker-build/build.sh from repository root" \
|| exit 1
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t ${DOCKERFILE}"
echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
echo -e "Volumename:\t ${VOLUMENAME}"
echo -e "arch:\t\t ${ARCH}"
echo -e "Platform:\t ${PLATFORM}"
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
echo -n "createing docker volume "
docker volume create "${VOLUMENAME}"
fi
# Build Container
docker build \
--platform="${PLATFORM}" \
--tag="${INVOKEAI_TAG}" \
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
--file="${DOCKERFILE}" \
.

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Variables shared by build.sh and run.sh
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
ARCH=${ARCH:-$(uname -m)}
PLATFORM=${PLATFORM:-Linux/${ARCH}}
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
INVOKEAI_BRANCH=$(git branch --show)
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}}

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
source ./docker-build/env.sh \
|| echo "please run from repository root" \
|| exit 1
# check if HUGGINGFACE_TOKEN is available
# You must have accepted the terms of use for required models
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
echo -e "You are using these values:\n"
echo -e "Volumename:\t ${VOLUMENAME}"
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
docker run \
--interactive \
--tty \
--rm \
--platform="$PLATFORM" \
--name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \
--mount="source=$VOLUMENAME,target=/data" \
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
"$INVOKEAI_TAG" ${1:+$@}

86
docker/Dockerfile Normal file
View File

@@ -0,0 +1,86 @@
# syntax=docker/dockerfile:1
ARG PYTHON_VERSION=3.9
##################
## base image ##
##################
FROM python:${PYTHON_VERSION}-slim AS python-base
# prepare for buildkit cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean
# Install necesarry packages
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.* \
&& rm -rf /var/lib/apt/lists/*
# set working directory and path
ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
#######################
## build pyproject ##
#######################
FROM python-base AS pyproject-builder
ENV PIP_USE_PEP517=1
# prepare for buildkit cache
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
RUN mkdir -p ${PIP_CACHE_DIR}
# Install dependencies
RUN \
--mount=type=cache,target=${PIP_CACHE_DIR} \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.* \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
python3 -m venv "${APPNAME}" \
--upgrade-deps
# copy sources
COPY --link . .
# install pyproject.toml
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
ARG PIP_PACKAGE=.
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
# build patchmatch
RUN python3 -c "from patchmatch import patch_match"
#####################
## runtime image ##
#####################
FROM python-base AS runtime
# setup environment
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
ENV INVOKEAI_ROOT=/data
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
# set Entrypoint and default CMD
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]
LABEL org.opencontainers.image.authors="mauwii@outlook.de"

44
docker/build.sh Executable file
View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
# CUDA 11.6: https://download.pytorch.org/whl/cu116
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
# CPU: https://download.pytorch.org/whl/cpu
# as found on https://pytorch.org/get-started/locally/
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}"
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
echo -n "createing docker volume "
docker volume create "${VOLUMENAME}"
fi
# Build Container
DOCKER_BUILDKIT=1 docker build \
--platform="${PLATFORM}" \
--tag="${CONTAINER_IMAGE}" \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \
..

38
docker/env.sh Normal file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda"
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
fi
fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-Linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"

31
docker/run.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
echo -e "local Models:\t${MODELSPATH:-unset}\n"
docker run \
--interactive \
--tty \
--rm \
--platform="${PLATFORM}" \
--name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \
--mount=source="${VOLUMENAME}",target=/data \
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${1:+$@}

View File

@@ -4,6 +4,108 @@ title: Changelog
# :octicons-log-16: **Changelog**
## v2.3.0 <small>(15 January 2023)</small>
**Transition to diffusers
Version 2.3 provides support for both the traditional `.ckpt` weight
checkpoint files as well as the HuggingFace `diffusers` format. This
introduces several changes you should know about.
1. The models.yaml format has been updated. There are now two
different type of configuration stanza. The traditional ckpt
one will look like this, with a `format` of `ckpt` and a
`weights` field that points to the absolute or ROOTDIR-relative
location of the ckpt file.
```
inpainting-1.5:
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting
format: ckpt
width: 512
height: 512
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
config: configs/stable-diffusion/v1-inpainting-inference.yaml
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
```
A configuration stanza for a diffusers model hosted at HuggingFace will look like this,
with a `format` of `diffusers` and a `repo_id` that points to the
repository ID of the model on HuggingFace:
```
stable-diffusion-2.1:
description: Stable Diffusion version 2.1 diffusers model (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1
format: diffusers
```
A configuration stanza for a diffuers model stored locally should
look like this, with a `format` of `diffusers`, but a `path` field
that points at the directory that contains `model_index.json`:
```
waifu-diffusion:
description: Latest waifu diffusion 1.4
format: diffusers
path: models/diffusers/hakurei-haifu-diffusion-1.4
```
2. In order of precedence, InvokeAI will now use HF_HOME, then
XDG_CACHE_HOME, then finally default to `ROOTDIR/models` to
store HuggingFace diffusers models.
Consequently, the format of the models directory has changed to
mimic the HuggingFace cache directory. When HF_HOME and XDG_HOME
are not set, diffusers models are now automatically downloaded
and retrieved from the directory `ROOTDIR/models/diffusers`,
while other models are stored in the directory
`ROOTDIR/models/hub`. This organization is the same as that used
by HuggingFace for its cache management.
This allows you to share diffusers and ckpt model files easily with
other machine learning applications that use the HuggingFace
libraries. To do this, set the environment variable HF_HOME
before starting up InvokeAI to tell it what directory to
cache models in. To tell InvokeAI to use the standard HuggingFace
cache directory, you would set HF_HOME like this (Linux/Mac):
`export HF_HOME=~/.cache/huggingface`
Both HuggingFace and InvokeAI will fall back to the XDG_CACHE_HOME
environment variable if HF_HOME is not set; this path
takes precedence over `ROOTDIR/models` to allow for the same sharing
with other machine learning applications that use HuggingFace
libraries.
3. If you upgrade to InvokeAI 2.3.* from an earlier version, there
will be a one-time migration from the old models directory format
to the new one. You will see a message about this the first time
you start `invoke.py`.
4. Both the front end back ends of the model manager have been
rewritten to accommodate diffusers. You can import models using
their local file path, using their URLs, or their HuggingFace
repo_ids. On the command line, all these syntaxes work:
```
!import_model stabilityai/stable-diffusion-2-1-base
!import_model /opt/sd-models/sd-1.4.ckpt
!import_model https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/blob/main/PaperCut_v1.ckpt
```
**KNOWN BUGS (15 January 2023)
1. On CUDA systems, the 768 pixel stable-diffusion-2.0 and
stable-diffusion-2.1 models can only be run as `diffusers` models
when the `xformer` library is installed and configured. Without
`xformers`, InvokeAI returns black images.
2. Inpainting and outpainting have regressed in quality.
Both these issues are being actively worked on.
## v2.2.4 <small>(11 December 2022)</small>
**the `invokeai` directory**
@@ -94,7 +196,7 @@ the desired release's zip file, which you can find by clicking on the green
This point release removes references to the binary installer from the
installation guide. The binary installer is not stable at the current
time. First time users are encouraged to use the "source" installer as
described in [Installing InvokeAI with the Source Installer](installation/INSTALL_SOURCE.md)
described in [Installing InvokeAI with the Source Installer](installation/deprecated_documentation/INSTALL_SOURCE.md)
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
robust workflow solution for creating AI-generated and human facilitated
@@ -159,7 +261,7 @@ sections describe what's new for InvokeAI.
[Installation](installation/index.md).
- A streamlined manual installation process that works for both Conda and
PIP-only installs. See
[Manual Installation](installation/INSTALL_MANUAL.md).
[Manual Installation](installation/020_INSTALL_MANUAL.md).
- The ability to save frequently-used startup options (model to load, steps,
sampler, etc) in a `.invokeai` file. See
[Client](features/CLI.md)

Binary file not shown.

After

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

View File

@@ -6,38 +6,51 @@ title: Command-Line Interface
## **Interactive Command Line Interface**
The `invoke.py` script, located in `scripts/`, provides an interactive interface
to image generation similar to the "invoke mothership" bot that Stable AI
provided on its Discord server.
The InvokeAI command line interface (CLI) provides scriptable access
to InvokeAI's features.Some advanced features are only available
through the CLI, though they eventually find their way into the WebUI.
Unlike the `txt2img.py` and `img2img.py` scripts provided in the original
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion) source
code repository, the time-consuming initialization of the AI model
initialization only happens once. After that image generation from the
command-line interface is very fast.
The CLI is accessible from the `invoke.sh`/`invoke.bat` launcher by
selecting option (1). Alternatively, it can be launched directly from
the command line by activating the InvokeAI environment and giving the
command:
```bash
invokeai
```
After some startup messages, you will be presented with the `invoke> `
prompt. Here you can type prompts to generate images and issue other
commands to load and manipulate generative models. The CLI has a large
number of command-line options that control its behavior. To get a
concise summary of the options, call `invokeai` with the `--help` argument:
```bash
invokeai --help
```
The script uses the readline library to allow for in-line editing, command
history (++up++ and ++down++), autocompletion, and more. To help keep track of
which prompts generated which images, the script writes a log file of image
names and prompts to the selected output directory.
In addition, as of version 1.02, it also writes the prompt into the PNG file's
metadata where it can be retrieved using `scripts/images2prompt.py`
The script is confirmed to work on Linux, Windows and Mac systems.
!!! note
This script runs from the command-line or can be used as a Web application. The Web GUI is
currently rudimentary, but a much better replacement is on its way.
Here is a typical session
```bash
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
PS1:C:\Users\fred> invokeai
* Initializing, be patient...
Loading model from models/ldm/text2img-large/model.ckpt
(...more initialization messages...)
* Initialization done! Awaiting your command...
* Initializing, be patient...
>> Initialization file /home/lstein/invokeai/invokeai.init found. Loading...
>> Internet connectivity is True
>> InvokeAI, version 2.3.0-rc5
>> InvokeAI runtime directory is "/home/lstein/invokeai"
>> GFPGAN Initialized
>> CodeFormer Initialized
>> ESRGAN Initialized
>> Using device_type cuda
>> xformers memory-efficient attention is available and enabled
(...more initialization messages...)
* Initialization done! Awaiting your command (-h for help, 'q' to quit)
invoke> ashley judd riding a camel -n2 -s150
Outputs:
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
@@ -47,27 +60,15 @@ invoke> "there's a fly in my soup" -n6 -g
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
invoke> q
# this shows how to retrieve the prompt stored in the saved image's metadata
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
00009.png: "ashley judd riding a camel" -s150 -S 416354203
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
```
![invoke-py-demo](../assets/dream-py-demo.png)
The `invoke>` prompt's arguments are pretty much identical to those used in the
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you
do). A significant change is that creation of individual images is now the
default unless `--grid` (`-g`) is given. A full list is given in
[List of prompt arguments](#list-of-prompt-arguments).
## Arguments
The script itself also recognizes a series of command-line switches that will
change important global defaults, such as the directory for image outputs and
the location of the model weight files.
The script recognizes a series of command-line switches that will
change important global defaults, such as the directory for image
outputs and the location of the model weight files.
### List of arguments recognized at the command line
@@ -82,10 +83,14 @@ overridden on a per-prompt basis (see
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Location for generated images. |
| `--prompt_as_dir` | `-p` | `False` | Name output directories using the prompt text. |
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
| `--model <modelname>` | | `stable-diffusion-1.5` | Loads the initial model specified in configs/models.yaml. |
| `--ckpt_convert ` | | `False` | If provided both .ckpt and .safetensors files will be auto-converted into diffusers format in memory |
| `--autoconvert <path>` | | `None` | On startup, scan the indicated directory for new .ckpt/.safetensor files and automatically convert and import them |
| `--precision` | | `fp16` | Provide `fp32` for full precision mode, `fp16` for half-precision. `fp32` needed for Macintoshes and some NVidia cards. |
| `--png_compression <0-9>` | `-z<0-9>` | `6` | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
| `--safety-checker` | | `False` | Activate safety checker for NSFW and other potentially disturbing imagery |
| `--patchmatch`, `--no-patchmatch` | | `--patchmatch` | Load/Don't load the PatchMatch inpainting extension |
| `--xformers`, `--no-xformers` | | `--xformers` | Load/Don't load the Xformers memory-efficient attention module (CUDA only) |
| `--web` | | `False` | Start in web server mode |
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
@@ -109,6 +114,7 @@ overridden on a per-prompt basis (see
| Argument | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| `--full_precision` | | `False` | Same as `--precision=fp32`|
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
@@ -136,7 +142,7 @@ mixture of both using any of the accepted command switch formats:
# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values.
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
# or renaming it and then running configure_invokeai.py again.
# or renaming it and then running invokeai-configure again.
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
--root="/Users/mauwii/invokeai"
@@ -336,8 +342,10 @@ useful for debugging the text masking process prior to inpainting with the
### Model selection and importation
The CLI allows you to add new models on the fly, as well as to switch among them
rapidly without leaving the script.
The CLI allows you to add new models on the fly, as well as to switch
among them rapidly without leaving the script. There are several
different model formats, each described in the [Model Installation
Guide](../installation/050_INSTALLING_MODELS.md).
#### `!models`
@@ -347,9 +355,9 @@ model is bold-faced
Example:
<pre>
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion not loaded Waifu Diffusion v1.3
inpainting-1.5 not loaded Stable Diffusion inpainting model
<b>stable-diffusion-1.5 active Stable Diffusion v1.5</b>
waifu-diffusion not loaded Waifu Diffusion v1.4
</pre>
#### `!switch <model>`
@@ -361,43 +369,30 @@ Note how the second column of the `!models` table changes to `cached` after a
model is first loaded, and that the long initialization step is not needed when
loading a cached model.
<pre>
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 cached Stable Diffusion v1.4</b>
waifu-diffusion active Waifu Diffusion v1.3
#### `!import_model <hugging_face_repo_ID>`
invoke> !switch waifu-diffusion
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
>> Model loaded in 18.24s
>> Max VRAM used to load the model: 2.17G
>> Current VRAM usage:2.17G
>> Setting Sampler to k_lms
This imports and installs a `diffusers`-style model that is stored on
the [HuggingFace Web Site](https://huggingface.co). You can look up
any [Stable Diffusion diffusers
model](https://huggingface.co/models?library=diffusers) and install it
with a command like the following:
invoke> !models
laion400m not loaded <no description>
stable-diffusion-1.4 cached Stable Diffusion v1.4
<b>waifu-diffusion active Waifu Diffusion v1.3</b>
```bash
!import_model prompthero/openjourney
```
invoke> !switch stable-diffusion-1.4
>> Caching model waifu-diffusion in system RAM
>> Retrieving model stable-diffusion-1.4 from system RAM cache
>> Setting Sampler to k_lms
#### `!import_model <path/to/diffusers/directory>`
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion cached Waifu Diffusion v1.3
</pre>
If you have a copy of a `diffusers`-style model saved to disk, you can
import it by passing the path to model's top-level directory.
#### `!import_model <path/to/model/weights>`
#### `!import_model <url>`
For a `.ckpt` or `.safetensors` file, if you have a direct download
URL for the file, you can provide it to `!import_model` and the file
will be downloaded and installed for you.
#### `!import_model <path/to/model/weights.ckpt>`
This command imports a new model weights file into InvokeAI, makes it available
for image generation within the script, and writes out the configuration for the
@@ -417,35 +412,12 @@ below, the bold-faced text shows what the user typed in with the exception of
the width, height and configuration file paths, which were filled in
automatically.
Example:
#### `!import_model <path/to/directory_of_models>`
<pre>
invoke> <b>!import_model models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt</b>
>> Model import in process. Please enter the values needed to configure this model:
Name for this model: <b>waifu-diffusion</b>
Description of this model: <b>Waifu Diffusion v1.3</b>
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
Default image width: <b>512</b>
Default image height: <b>512</b>
>> New configuration:
waifu-diffusion:
config: configs/stable-diffusion/v1-inference.yaml
description: Waifu Diffusion v1.3
height: 512
weights: models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
width: 512
OK to import [n]? <b>y</b>
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
invoke>
</pre>
If you provide the path of a directory that contains one or more
`.ckpt` or `.safetensors` files, the CLI will scan the directory and
interactively offer to import the models it finds there. Also see the
`--autoconvert` command-line option.
#### `!edit_model <name_of_model>`
@@ -479,11 +451,6 @@ OK to import [n]? y
...
</pre>
======= invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3 ...lots of
text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
### History processing
The CLI provides a series of convenient commands for reviewing previous actions,

View File

@@ -51,7 +51,7 @@ You can also combine styles and concepts:
If you used an installer to install InvokeAI, you may have already set a HuggingFace token.
If you skipped this step, you can:
- run the InvokeAI configuration script again (if you used a manual installer): `scripts/configure_invokeai.py`
- run the InvokeAI configuration script again (if you used a manual installer): `invokeai-configure`
- set one of the `HUGGINGFACE_TOKEN` or `HUGGING_FACE_HUB_TOKEN` environment variables to contain your token
Finally, if you already used any HuggingFace library on your computer, you might already have a token

View File

@@ -4,13 +4,24 @@ title: Image-to-Image
# :material-image-multiple: Image-to-Image
## `img2img`
Both the Web and command-line interfaces provide an "img2img" feature
that lets you seed your creations with an initial drawing or
photo. This is a really cool feature that tells stable diffusion to
build the prompt on top of the image you provide, preserving the
original's basic shape and layout.
This script also provides an `img2img` feature that lets you seed your creations
with an initial drawing or photo. This is a really cool feature that tells
stable diffusion to build the prompt on top of the image you provide, preserving
the original's basic shape and layout. To use it, provide the `--init_img`
option as shown here:
See the [WebUI Guide](WEB.md) for a walkthrough of the img2img feature
in the InvokeAI web server. This document describes how to use img2img
in the command-line tool.
## Basic Usage
Launch the command-line client by launching `invoke.sh`/`invoke.bat`
and choosing option (1). Alternative, activate the InvokeAI
environment and issue the command `invokeai`.
Once the `invoke> ` prompt appears, you can start an img2img render by
pointing to a seed file with the `-I` option as shown here:
!!! example ""

View File

@@ -0,0 +1,76 @@
---
title: Model Merging
---
# :material-image-off: Model Merging
## How to Merge Models
As of version 2.3, InvokeAI comes with a script that allows you to
merge two or three diffusers-type models into a new merged model. The
resulting model will combine characteristics of the original, and can
be used to teach an old model new tricks.
You may run the merge script by starting the invoke launcher
(`invoke.sh` or `invoke.bat`) and choosing the option for _merge
models_. This will launch a text-based interactive user interface that
prompts you to select the models to merge, how to merge them, and the
merged model name.
Alternatively you may activate InvokeAI's virtual environment from the
command line, and call the script via `merge_models --gui` to open up
a version that has a nice graphical front end. To get the commandline-
only version, omit `--gui`.
The user interface for the text-based interactive script is
straightforward. It shows you a series of setting fields. Use control-N (^N)
to move to the next field, and control-P (^P) to move to the previous
one. You can also use TAB and shift-TAB to move forward and
backward. Once you are in a multiple choice field, use the up and down
cursor arrows to move to your desired selection, and press <SPACE> or
<ENTER> to select it. Change text fields by typing in them, and adjust
scrollbars using the left and right arrow keys.
Once you are happy with your settings, press the OK button. Note that
there may be two pages of settings, depending on the height of your
screen, and the OK button may be on the second page. Advance past the
last field of the first page to get to the second page, and reverse
this to get back.
If the merge runs successfully, it will create a new diffusers model
under the selected name and register it with InvokeAI.
## The Settings
* Model Selection -- there are three multiple choice fields that
display all the diffusers-style models that InvokeAI knows about.
If you do not see the model you are looking for, then it is probably
a legacy checkpoint model and needs to be converted using the
`invoke` command-line client and its `!optimize` command. You
must select at least two models to merge. The third can be left at
"None" if you desire.
* Alpha -- This is the ratio to use when combining models. It ranges
from 0 to 1. The higher the value, the more weight is given to the
2d and (optionally) 3d models. So if you have two models named "A"
and "B", an alpha value of 0.25 will give you a merged model that is
25% A and 75% B.
* Interpolation Method -- This is the method used to combine
weights. The options are "weighted_sum" (the default), "sigmoid",
"inv_sigmoid" and "add_difference". Each produces slightly different
results. When three models are in use, only "add_difference" is
available. (TODO: cite a reference that describes what these
interpolation methods actually do and how to decide among them).
* Force -- Not all models are compatible with each other. The merge
script will check for compatibility and refuse to merge ones that
are incompatible. Set this checkbox to try merging anyway.
* Name for merged model - This is the name for the new model. Please
use InvokeAI conventions - only alphanumeric letters and the
characters ".+-".
## Caveats
This is a new script and may contain bugs.

View File

@@ -120,7 +120,7 @@ A number of caveats:
(`--iterations`) argument.
3. Your results will be _much_ better if you use the `inpaint-1.5` model
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
released by runwayML and installed by default by `invokeai-configure`.
This model was trained specifically to harmoniously fill in image gaps. The
standard model will work as well, but you may notice color discontinuities at
the border.

View File

@@ -28,11 +28,11 @@ should "just work" without further intervention. Simply pass the `--upscale`
the popup in the Web GUI.
**GFPGAN** requires a series of downloadable model files to work. These are
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
loaded when you run `invokeai-configure`. If GFPAN is failing with an
error, please run the following from the InvokeAI directory:
```bash
python scripts/configure_invokeai.py
invokeai-configure
```
If you do not run this script in advance, the GFPGAN module will attempt to
@@ -106,7 +106,7 @@ This repo also allows you to perform face restoration using
[CodeFormer](https://github.com/sczhou/CodeFormer).
In order to setup CodeFormer to work, you need to download the models like with
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
GFPGAN. You can do this either by running `invokeai-configure` or by manually
downloading the
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.

View File

@@ -239,28 +239,24 @@ Generate an image with a given prompt, record the seed of the image, and then
use the `prompt2prompt` syntax to substitute words in the original prompt for
words in a new prompt. This works for `img2img` as well.
- `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
- quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
- for single word substitutions parentheses are also optional:
`a cat.swap(dog) eating a hotdog`.
- Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely
corresponding to bloc97's `prompt_edit_spatial_start/_end` and
`prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
intuitively understand.
- Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end`
argument means that the "spatial" (self-attention) edit will stop having any
effect after 30% (=0.3) of the steps have been done, leaving Stable
Diffusion with 70% of the steps where it is free to decide for itself how to
reshape the cat-form into a dog form.
- The numbers represent a percentage through the step sequence where the edits
should happen. 0 means the start (noisy starting image), 1 is the end (final
image).
- For img2img, the step sequence does not start at 0 but instead at
(1-strength) - so if strength is 0.7, s_start and s_end must both be
greater than 0.3 (1-0.7) to have any effect.
- Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable
Diffusion should have to change the shape of the subject being swapped.
- `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
For example, consider the prompt `a cat.swap(dog) playing with a ball in the forest`. Normally, because of the word words interact with each other when doing a stable diffusion image generation, these two prompts would generate different compositions:
- `a cat playing with a ball in the forest`
- `a dog playing with a ball in the forest`
| `a cat playing with a ball in the forest` | `a dog playing with a ball in the forest` |
| --- | --- |
| img | img |
- For multiple word swaps, use parentheses: `a (fluffy cat).swap(barking dog) playing with a ball in the forest`.
- To swap a comma, use quotes: `a ("fluffy, grey cat").swap("big, barking dog") playing with a ball in the forest`.
- Supports options `t_start` and `t_end` (each 0-1) loosely corresponding to bloc97's `prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
intuitively understand. `t_start` and `t_end` are used to control on which steps cross-attention control should run. With the default values `t_start=0` and `t_end=1`, cross-attention control is active on every step of image generation. Other values can be used to turn cross-attention control off for part of the image generation process.
- For example, if doing a diffusion with 10 steps for the prompt is `a cat.swap(dog, t_start=0.3, t_end=1.0) playing with a ball in the forest`, the first 3 steps will be run as `a cat playing with a ball in the forest`, while the last 7 steps will run as `a dog playing with a ball in the forest`, but the pixels that represent `dog` will be locked to the pixels that would have represented `cat` if the `cat` prompt had been used instead.
- Conversely, for `a cat.swap(dog, t_start=0, t_end=0.7) playing with a ball in the forest`, the first 7 steps will run as `a dog playing with a ball in the forest` with the pixels that represent `dog` locked to the same pixels that would have represented `cat` if the `cat` prompt was being used instead. The final 3 steps will just run `a cat playing with a ball in the forest`.
> For img2img, the step sequence does not start at 0 but instead at `(1.0-strength)` - so if the img2img `strength` is `0.7`, `t_start` and `t_end` must both be greater than `0.3` (`1.0-0.7`) to have any effect.
Prompt2prompt `.swap()` is not compatible with xformers, which will be temporarily disabled when doing a `.swap()` - so you should expect to use more VRAM and run slower that with xformers enabled.
The `prompt2prompt` code is based off
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).

View File

@@ -10,83 +10,260 @@ You may personalize the generated images to provide your own styles or objects
by training a new LDM checkpoint and introducing a new vocabulary to the fixed
model as a (.pt) embeddings file. Alternatively, you may use or train
HuggingFace Concepts embeddings files (.bin) from
<https://huggingface.co/sd-concepts-library> and its associated notebooks.
<https://huggingface.co/sd-concepts-library> and its associated
notebooks.
## **Training**
## **Hardware and Software Requirements**
To train, prepare a folder that contains images sized at 512x512 and execute the
following:
You will need a GPU to perform training in a reasonable length of
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
library](../installation/070_INSTALL_XFORMERS) to accelerate the
training process further. During training, about ~8 GB is temporarily
needed in order to store intermediate models, checkpoints and logs.
### WINDOWS
## **Preparing for Training**
As the default backend is not available on Windows, if you're using that
platform, set the environment variable `PL_TORCH_DISTRIBUTED_BACKEND` to `gloo`
To train, prepare a folder that contains 3-5 images that illustrate
the object or concept. It is good to provide a variety of examples or
poses to avoid overtraining the system. Format these images as PNG
(preferred) or JPG. You do not need to resize or crop the images in
advance, but for more control you may wish to do so.
```bash
python3 ./main.py -t \
--base ./configs/stable-diffusion/v1-finetune.yaml \
--actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \
-n my_cat \
--gpus 0 \
--data_root D:/textual-inversion/my_cat \
--init_word 'cat'
Place the training images in a directory on the machine InvokeAI runs
on. We recommend placing them in a subdirectory of the
`text-inversion-training-data` folder located in the InvokeAI root
directory, ordinarily `~/invokeai` (Linux/Mac), or
`C:\Users\your_name\invokeai` (Windows). For example, to create an
embedding for the "psychedelic" style, you'd place the training images
into the directory
`~invokeai/text-inversion-training-data/psychedelic`.
## **Launching Training Using the Console Front End**
InvokeAI 2.3 and higher comes with a text console-based training front
end. From within the `invoke.sh`/`invoke.bat` Invoke launcher script,
start the front end by selecting choice (3):
```sh
Do you want to generate images using the
1. command-line
2. browser-based UI
3. textual inversion training
4. open the developer console
Please enter 1, 2, 3, or 4: [1] 3
```
During the training process, files will be created in
`/logs/[project][time][project]/` where you can see the process.
From the command line, with the InvokeAI virtual environment active,
you can launch the front end with the command `invokeai-ti --gui`.
Conditioning contains the training prompts inputs, reconstruction the input
images for the training epoch samples, samples scaled for a sample of the prompt
and one with the init word provided.
This will launch a text-based front end that will look like this:
On a RTX3090, the process for SD will take ~1h @1.6 iterations/sec.
<figure markdown>
![ti-frontend](../assets/textual-inversion/ti-frontend.png)
</figure>
!!! note
The interface is keyboard-based. Move from field to field using
control-N (^N) to move to the next field and control-P (^P) to the
previous one. <Tab> and <shift-TAB> work as well. Once a field is
active, use the cursor keys. In a checkbox group, use the up and down
cursor keys to move from choice to choice, and <space> to select a
choice. In a scrollbar, use the left and right cursor keys to increase
and decrease the value of the scroll. In textfields, type the desired
values.
According to the associated paper, the optimal number of
images is 3-5. Your model may not converge if you use more images than
that.
The number of parameters may look intimidating, but in most cases the
predefined defaults work fine. The red circled fields in the above
illustration are the ones you will adjust most frequently.
Training will run indefinitely, but you may wish to stop it (with ctrl-c) before
the heat death of the universe, when you find a low loss epoch or around ~5000
iterations. Note that you can set a fixed limit on the number of training steps
by decreasing the "max_steps" option in
configs/stable_diffusion/v1-finetune.yaml (currently set to 4000000)
### Model Name
## **Run the Model**
This will list all the diffusers models that are currently
installed. Select the one you wish to use as the basis for your
embedding. Be aware that if you use a SD-1.X-based model for your
training, you will only be able to use this embedding with other
SD-1.X-based models. Similarly, if you train on SD-2.X, you will only
be able to use the embeddings with models based on SD-2.X.
Once the model is trained, specify the trained .pt or .bin file when starting
invoke using
### Trigger Term
```bash
python3 ./scripts/invoke.py \
--embedding_path /path/to/embedding.pt
This is the prompt term you will use to trigger the embedding. Type a
single word or phrase you wish to use as the trigger, example
"psychedelic" (without angle brackets). Within InvokeAI, you will then
be able to activate the trigger using the syntax `<psychedelic>`.
### Initializer
This is a single character that is used internally during the training
process as a placeholder for the trigger term. It defaults to "*" and
can usually be left alone.
### Resume from last saved checkpoint
As training proceeds, textual inversion will write a series of
intermediate files that can be used to resume training from where it
was left off in the case of an interruption. This checkbox will be
automatically selected if you provide a previously used trigger term
and at least one checkpoint file is found on disk.
Note that as of 20 January 2023, resume does not seem to be working
properly due to an issue with the upstream code.
### Data Training Directory
This is the location of the images to be used for training. When you
select a trigger term like "my-trigger", the frontend will prepopulate
this field with `~/invokeai/text-inversion-training-data/my-trigger`,
but you can change the path to wherever you want.
### Output Destination Directory
This is the location of the logs, checkpoint files, and embedding
files created during training. When you select a trigger term like
"my-trigger", the frontend will prepopulate this field with
`~/invokeai/text-inversion-output/my-trigger`, but you can change the
path to wherever you want.
### Image resolution
The images in the training directory will be automatically scaled to
the value you use here. For best results, you will want to use the
same default resolution of the underlying model (512 pixels for
SD-1.5, 768 for the larger version of SD-2.1).
### Center crop images
If this is selected, your images will be center cropped to make them
square before resizing them to the desired resolution. Center cropping
can indiscriminately cut off the top of subjects' heads for portrait
aspect images, so if you have images like this, you may wish to use a
photoeditor to manually crop them to a square aspect ratio.
### Mixed precision
Select the floating point precision for the embedding. "no" will
result in a full 32-bit precision, "fp16" will provide 16-bit
precision, and "bf16" will provide mixed precision (only available
when XFormers is used).
### Max training steps
How many steps the training will take before the model converges. Most
training sets will converge with 2000-3000 steps.
### Batch size
This adjusts how many training images are processed simultaneously in
each step. Higher values will cause the training process to run more
quickly, but use more memory. The default size will run with GPUs with
as little as 12 GB.
### Learning rate
The rate at which the system adjusts its internal weights during
training. Higher values risk overtraining (getting the same image each
time), and lower values will take more steps to train a good
model. The default of 0.0005 is conservative; you may wish to increase
it to 0.005 to speed up training.
### Scale learning rate by number of GPUs, steps and batch size
If this is selected (the default) the system will adjust the provided
learning rate to improve performance.
### Use xformers acceleration
This will activate XFormers memory-efficient attention. You need to
have XFormers installed for this to have an effect.
### Learning rate scheduler
This adjusts how the learning rate changes over the course of
training. The default "constant" means to use a constant learning rate
for the entire training session. The other values scale the learning
rate according to various formulas.
Only "constant" is supported by the XFormers library.
### Gradient accumulation steps
This is a parameter that allows you to use bigger batch sizes than
your GPU's VRAM would ordinarily accommodate, at the cost of some
performance.
### Warmup steps
If "constant_with_warmup" is selected in the learning rate scheduler,
then this provides the number of warmup steps. Warmup steps have a
very low learning rate, and are one way of preventing early
overtraining.
## The training run
Start the training run by advancing to the OK button (bottom right)
and pressing <enter>. A series of progress messages will be displayed
as the training process proceeds. This may take an hour or two,
depending on settings and the speed of your system. Various log and
checkpoint files will be written into the output directory (ordinarily
`~/invokeai/text-inversion-output/my-model/`)
At the end of successful training, the system will copy the file
`learned_embeds.bin` into the InvokeAI root directory's `embeddings`
directory, using a subdirectory named after the trigger token. For
example, if the trigger token was `psychedelic`, then look for the
embeddings file in
`~/invokeai/embeddings/psychedelic/learned_embeds.bin`
You may now launch InvokeAI and try out a prompt that uses the trigger
term. For example `a plate of banana sushi in <psychedelic> style`.
## **Training with the Command-Line Script**
Training can also be done using a traditional command-line script. It
can be launched from within the "developer's console", or from the
command line after activating InvokeAI's virtual environment.
It accepts a large number of arguments, which can be summarized by
passing the `--help` argument:
```sh
invokeai-ti --help
```
Then, to utilize your subject at the invoke prompt
```bash
invoke> "a photo of *"
Typical usage is shown here:
```sh
invokeai-ti \
--model=stable-diffusion-1.5 \
--resolution=512 \
--learnable_property=style \
--initializer_token='*' \
--placeholder_token='<psychedelic>' \
--train_data_dir=/home/lstein/invokeai/training-data/psychedelic \
--output_dir=/home/lstein/invokeai/text-inversion-training/psychedelic \
--scale_lr \
--train_batch_size=8 \
--gradient_accumulation_steps=4 \
--max_train_steps=3000 \
--learning_rate=0.0005 \
--resume_from_checkpoint=latest \
--lr_scheduler=constant \
--mixed_precision=fp16 \
--only_save_embeds
```
This also works with image2image
## Reading
```bash
invoke> "waterfall and rainbow in the style of *" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4
```
For more information on textual inversion, please see the following
resources:
For .pt files it's also possible to train multiple tokens (modify the
placeholder string in `configs/stable-diffusion/v1-finetune.yaml`) and combine
LDM checkpoints using:
* The [textual inversion repository](https://github.com/rinongal/textual_inversion) and
associated paper for details and limitations.
* [HuggingFace's textual inversion training
page](https://huggingface.co/docs/diffusers/training/text_inversion)
* [HuggingFace example script
documentation](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)
(Note that this script is similar to, but not identical, to
`textual_inversion`, but produces embed files that are completely compatible.
```bash
python3 ./scripts/merge_embeddings.py \
--manager_ckpts /path/to/first/embedding.pt \
[</path/to/second/embedding.pt>,[...]] \
--output_path /path/to/output/embedding.pt
```
---
Credit goes to rinongal and the repository
Please see [the repository](https://github.com/rinongal/textual_inversion) and
associated paper for details and limitations.
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team

View File

@@ -5,11 +5,14 @@ title: InvokeAI Web Server
# :material-web: InvokeAI Web Server
As of version 2.0.0, this distribution comes with a full-featured web server
(see screenshot). To use it, run the `invoke.py` script by adding the `--web`
option:
(see screenshot).
To use it, launch the `invoke.sh`/`invoke.bat` script and select
option (2). Alternatively, with the InvokeAI environment active, run
the `invokeai` script by adding the `--web` option:
```bash
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
invokeai --web
```
You can then connect to the server by pointing your web browser at
@@ -19,17 +22,23 @@ address of the host you are running it on, or the wildcard `0.0.0.0`. For
example:
```bash
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
invoke.sh --host 0.0.0.0
```
## Quick guided walkthrough of the WebGUI's features
or
While most of the WebGUI's features are intuitive, here is a guided walkthrough
```bash
invokeai --web --host 0.0.0.0
```
## Quick guided walkthrough of the WebUI's features
While most of the WebUI's features are intuitive, here is a guided walkthrough
through its various components.
![Invoke Web Server - Major Components](../assets/invoke-web-server-1.png){:width="640px"}
The screenshot above shows the Text to Image tab of the WebGUI. There are three
The screenshot above shows the Text to Image tab of the WebUI. There are three
main sections:
1. A **control panel** on the left, which contains various settings for text to
@@ -63,12 +72,14 @@ From top to bottom, these are:
1. Text to Image - generate images from text
2. Image to Image - from an uploaded starting image (drawing or photograph)
generate a new one, modified by the text prompt
3. Inpainting (pending) - Interactively erase portions of a starting image and
have the AI fill in the erased region from a text prompt.
4. Outpainting (pending) - Interactively add blank space to the borders of a
starting image and fill in the background from a text prompt.
5. Postprocessing (pending) - Interactively postprocess generated images using a
variety of filters.
3. Unified Canvas - Interactively combine multiple images, extend them
with outpainting,and modify interior portions of the image with
inpainting, erase portions of a starting image and have the AI fill in
the erased region from a text prompt.
4. Workflow Management (not yet implemented) - this panel will allow you to create
pipelines of common operations and combine them into workflows.
5. Training (not yet implemented) - this panel will provide an interface to [textual
inversion training](TEXTUAL_INVERSION.md) and fine tuning.
The inpainting, outpainting and postprocessing tabs are currently in
development. However, limited versions of their features can already be accessed
@@ -76,18 +87,18 @@ through the Text to Image and Image to Image tabs.
## Walkthrough
The following walkthrough will exercise most (but not all) of the WebGUI's
The following walkthrough will exercise most (but not all) of the WebUI's
feature set.
### Text to Image
1. Launch the WebGUI using `python scripts/invoke.py --web` and connect to it
1. Launch the WebUI using `python scripts/invoke.py --web` and connect to it
with your browser by accessing `http://localhost:9090`. If the browser and
server are running on different machines on your LAN, add the option
`--host 0.0.0.0` to the launch command line and connect to the machine
hosting the web server using its IP address or domain name.
2. If all goes well, the WebGUI should come up and you'll see a green
2. If all goes well, the WebUI should come up and you'll see a green
`connected` message on the upper right.
#### Basics
@@ -234,7 +245,7 @@ walkthrough.
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
the blank area to get an upload dialog. The image will load into an area
marked _Initial Image_. (The WebGUI will also load the most
marked _Initial Image_. (The WebUI will also load the most
recently-generated image from the gallery into a section on the left, but
this image will be replaced in the next step.)
@@ -284,13 +295,17 @@ initial image" icons are located.
![Invoke Web Server - Use as Image Links](../assets/invoke-web-server-9.png){:width="640px"}
### Unified Canvas
See the [Unified Canvas Guide](UNIFIED_CANVAS.md)
## Parting remarks
This concludes the walkthrough, but there are several more features that you can
explore. Please check out the [Command Line Interface](CLI.md) documentation for
further explanation of the advanced features that were not covered here.
The WebGUI is only rapid development. Check back regularly for updates!
The WebUI is only rapid development. Check back regularly for updates!
## Reference

View File

@@ -2,4 +2,62 @@
title: Overview
---
Here you can find the documentation for different features.
Here you can find the documentation for InvokeAI's various features.
## The Basics
### * The [Web User Interface](WEB.md)
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
### * The [Unified Canvas](UNIFIED_CANVAS.md)
Build complex scenes by combine and modifying multiple images in a stepwise
fashion. This feature combines img2img, inpainting and outpainting in
a single convenient digital artist-optimized user interface.
### * The [Command Line Interface (CLI)](CLI.md)
Scriptable access to InvokeAI's features.
## Image Generation
### * [Prompt Engineering](PROMPTS.md)
Get the images you want with the InvokeAI prompt engineering language.
## * [Post-Processing](POSTPROCESS.md)
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
## * The [Concepts Library](CONCEPTS.md)
Add custom subjects and styles using HuggingFace's repository of embeddings.
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
Use a seed image to build new creations in the CLI.
### * [Inpainting Guide for the CLI](INPAINTING.md)
Selectively erase and replace portions of an existing image in the CLI.
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
Extend the borders of the image with an "outcrop" function within the CLI.
### * [Generating Variations](VARIATIONS.md)
Have an image you like and want to generate many more like it? Variations
are the ticket.
## Model Management
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This
guide also covers optimizing models to load quickly.
## * [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a
new model that combines characteristics of the originals.
## * [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects.
# Other Features
## * [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images.
## * [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more!

View File

@@ -81,22 +81,6 @@ Q&A</a>]
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
## :octicons-package-dependencies-24: Installation
This fork is supported across Linux, Windows and Macintosh. Linux users can use
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
First time users, please see
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
getting InvokeAI up and running on your system. For alternative installation and
upgrade instructions, please see:
[InvokeAI Installation Overview](installation/)
Linux users who wish to make use of the PyPatchMatch inpainting functions will
need to perform a bit of extra work to enable this module. Instructions can be
found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
## :fontawesome-solid-computer: Hardware Requirements
### :octicons-cpu-24: System
@@ -116,139 +100,146 @@ images in full-precision mode:
- GTX 1650 series cards
- GTX 1660 series cards
### :fontawesome-solid-memory: Memory
### :fontawesome-solid-memory: Memory and Disk
- At least 12 GB Main Memory RAM.
### :fontawesome-regular-hard-drive: Disk
- At least 18 GB of free disk space for the machine learning model, Python, and
all its dependencies.
!!! info
## :octicons-package-dependencies-24: Installation
Precision is auto configured based on the device. If however you encounter errors like
`expected type Float but found Half` or `not implemented for Half` you can try starting
`invoke.py` with the `--precision=float32` flag:
This fork is supported across Linux, Windows and Macintosh. Linux users can use
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
```bash
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
```
### [Installation Getting Started Guide](installation)
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
This method is recommended for 1st time users
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
This method is recommended for experienced users and developers
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
This method is recommended for those familiar with running Docker containers
### Other Installation Guides
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
## :octicons-gift-24: InvokeAI Features
- [The InvokeAI Web Interface](features/WEB.md) -
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- seperator -->
- [The Command Line Interace](features/CLI.md) -
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
[Outpainting](features/OUTPAINTING.md) -
[Adding custom styles and subjects](features/CONCEPTS.md) -
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- separator -->
### The InvokeAI Command Line Interface
- [Command Line Interace Reference Guide](features/CLI.md)
<!-- separator -->
### Image Management
- [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md)
- [Outpainting](features/OUTPAINTING.md)
- [Adding custom styles and subjects](features/CONCEPTS.md)
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other Features](features/OTHER.md)
<!-- separator -->
### Model Management
- [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md)
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
<!-- seperator -->
### Prompt Engineering
- [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md)
<!-- seperator -->
- [Prompt Engineering](features/PROMPTS.md)
<!-- seperator -->
- Miscellaneous
- [NSFW Checker](features/NSFW.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other](features/OTHER.md)
## :octicons-log-16: Latest Changes
### v2.2.4 <small>(11 December 2022)</small>
### v2.3.0 <small>(9 February 2023)</small>
#### the `invokeai` directory
#### Migration to Stable Diffusion `diffusers` models
Previously there were two directories to worry about, the directory that
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
directory that contained the models files, embeddings, configuration and
outputs. With the 2.2.4 release, this dual system is done away with, and
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
live in a directory named `invokeai`. By default this directory is located in
your home directory (e.g. `\Users\yourname` on Windows), but you can select
where it goes at install time.
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
After installation, you can delete the install directory (the one that the zip
file creates when it unpacks). Do **not** delete or move the `invokeai`
directory!
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
##### Initialization file `invokeai/invokeai.init`
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
You can place frequently-used startup options in this file, such as the default
number of steps or your preferred sampler. To keep everything in one place, this
file has now been moved into the `invokeai` directory and is named
`invokeai.init`.
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
#### To update from Version 2.2.3
The WebGUI offers similar functionality for model management.
The easiest route is to download and unpack one of the 2.2.4 installer files.
When it asks you for the location of the `invokeai` runtime directory, respond
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
and answer "Y" when asked if you want to reuse the directory.
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
does not know about the new directory layout and won't be fully functional.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
#### To update to 2.2.5 (and beyond) there's now an update path.
#### Support for the `XFormers` Memory-Efficient Crossattention Package
As they become available, you can update to more recent versions of InvokeAI
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
Running it without any arguments will install the most recent version of
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
script with an argument in the command shell. This syntax accepts the path to
the desired release's zip file, which you can find by clicking on the green
"Code" button on this repository's home page.
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
#### Other 2.2.4 Improvements
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
- Fix InvokeAI GUI initialization by @addianto in #1687
- fix link in documentation by @lstein in #1728
- Fix broken link by @ShawnZhong in #1736
- Remove reference to binary installer by @lstein in #1731
- documentation fixes for 2.2.3 by @lstein in #1740
- Modify installer links to point closer to the source installer by @ebr in
#1745
- add documentation warning about 1650/60 cards by @lstein in #1753
- Fix Linux source URL in installation docs by @andybearman in #1756
- Make install instructions discoverable in readme by @damian0815 in #1752
- typo fix by @ofirkris in #1755
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
in #1765
- stability and usage improvements to binary & source installers by @lstein in
#1760
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
- invoke script cds to its location before running by @lstein in #1805
- Make PaperCut and VoxelArt models load again by @lstein in #1730
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
#1817
- Clean up readme by @hipsterusername in #1820
- Optimized Docker build with support for external working directory by @ebr in
#1544
- disable pushing the cloud container by @mauwii in #1831
- Fix docker push github action and expand with additional metadata by @ebr in
#1837
- Fix Broken Link To Notebook by @VedantMadane in #1821
- Account for flat models by @spezialspezial in #1766
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
@SammCheese in #1848
- Make force free GPU memory work in img2img by @addianto in #1844
- New installer by @lstein
#### A Negative Prompt Box in the WebUI
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
#### Model Merging
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
#### Textual Inversion Training
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
#### A New Installer Experience
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
#### Command-line name changes
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
* `invokeai` -- Command-line client
* `invokeai --web` -- Web GUI
* `invokeai-merge --gui` -- Model merging script with graphical front end
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
* `invokeai` => `ldm/invoke/CLI.py`
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
For older changelogs, please visit the
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting
Please check out our
**[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get
solutions for common installation problems and other issues.
Please check out our **[:material-frequently-asked-questions:
Troubleshooting
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
get solutions for common installation problems and other issues.
## :octicons-repo-push-24: Contributing
@@ -274,8 +265,8 @@ thank them for their time, hard work and effort.
For support, please use this repository's GitHub Issues tracking service. Feel
free to send me an email if you use and like the script.
Original portions of the software are Copyright (c) 2020
[Lincoln D. Stein](https://github.com/lstein)
Original portions of the software are Copyright (c) 2022-23
by [The InvokeAI Team](https://github.com/invoke-ai).
## :octicons-book-24: Further Reading

View File

@@ -6,57 +6,106 @@ title: Installing with the Automated Installer
## Introduction
The automated installer is a shell script that attempts to automate every step
needed to install and run InvokeAI on a stock computer running recent versions
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
version of InvokeAI with the option to upgrade to experimental versions later.
The automated installer is a Python script that automates the steps
needed to install and run InvokeAI on a stock computer running recent
versions of Linux, MacOS or Windows. It will leave you with a version
that runs a stable version of InvokeAI with the option to upgrade to
experimental versions later.
## Walk through
1. Make sure that your system meets the
[hardware requirements](../index.md#hardware-requirements) and has the
appropriate GPU drivers installed. In particular, if you are a Linux user
with an AMD GPU installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
1. <a name="hardware_requirements">**Hardware Requirements**: </a>Make sure that your system meets the [hardware
requirements](../index.md#hardware-requirements) and has the
appropriate GPU drivers installed. For a system with an NVIDIA
card installed, you will need to install the CUDA driver, while
AMD-based cards require the ROCm driver. In most cases, if you've
already used the system for gaming or other graphics-intensive
tasks, the appropriate drivers will already be installed. If
unsure, check the [GPU Driver Guide](030_INSTALL_CUDA_AND_ROCM.md)
!!! info "Required Space"
Installation requires roughly 18G of free disk space to load the libraries and
recommended model weights files.
Installation requires roughly 18G of free disk space to load
the libraries and recommended model weights files.
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
Regardless of your destination disk, your *system drive*
(`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB
of free disk space to download and cache python
dependencies.
2. Check that your system has an up-to-date Python installed. To do this, open
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
"Powershell" on Windows) and type `python --version`. If Python is
installed, it will print out the version number. If it is version `3.9.1` or
higher, you meet requirements.
NOTE for Linux users: if your temporary directory is mounted
as a `tmpfs`, ensure it has sufficient space.
!!! warning "If you see an older version, or get a command not found error"
2. <a name="software_requirements">**Software Requirements**: </a>Check that your system has an up-to-date Python installed. To do
this, open up a command-line window ("Terminal" on Linux and
Macintosh, "Command" or "Powershell" on Windows) and type `python
--version`. If Python is installed, it will print out the version
number. If it is version `3.9.1` or `3.10.x`, you meet
requirements.
Go to [Python Downloads](https://www.python.org/downloads/) and
download the appropriate installer package for your platform. We recommend
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
which has been extensively tested with InvokeAI.
!!! warning "At this time we do not recommend Python 3.11"
!!! warning "What to do if you have an unsupported version"
Go to [Python Downloads](https://www.python.org/downloads/)
and download the appropriate installer package for your
platform. We recommend [Version
3.10.9](https://www.python.org/downloads/release/python-3109/),
which has been extensively tested with InvokeAI. At this time
we do not recommend Python 3.11.
_Please select your platform in the section below for platform-specific
setup requirements._
=== "Windows users"
=== "Windows"
During the Python configuration process, look out for a
checkbox to add Python to your PATH and select it. If the
install script complains that it can't find python, then open
the Python installer again and choose "Modify" existing
installation.
- During the Python configuration process,
look out for a checkbox to add Python to your PATH
and select it. If the install script complains that it can't
find python, then open the Python installer again and choose
"Modify" existing installation.
Installation requires an up to date version of the Microsoft
Visual C libraries. Please install the 2015-2022 libraries
available here:
https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
Please double-click on the file `WinLongPathsEnabled.reg` and
accept the dialog box that asks you if you wish to modify your registry.
This activates long filename support on your system and will prevent
mysterious errors during installation.
=== "Mac users"
=== "Linux"
To install an appropriate version of Python on Ubuntu 22.04
and higher, run the following:
- After installing Python, you may need to run the
```
sudo apt update
sudo apt install -y python3 python3-pip python3-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
On Ubuntu 20.04, the process is slightly different:
```
sudo apt update
sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt install python3.10 python3-pip python3.10-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
Both `python` and `python3` commands are now pointing at
Python3.10. You can still access older versions of Python by
calling `python2`, `python3.8`, etc.
Linux systems require a couple of additional graphics
libraries to be installed for proper functioning of
`python3-opencv`. Please run the following:
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
=== "Mac"
After installing Python, you may need to run the
following command from the Terminal in order to install the Web
certificates needed to download model data from https sites. If
you see lots of CERTIFICATE ERRORS during the last part of the
@@ -64,97 +113,55 @@ version of InvokeAI with the option to upgrade to experimental versions later.
`/Applications/Python\ 3.10/Install\ Certificates.command`
- You may need to install the Xcode command line tools. These
You may need to install the Xcode command line tools. These
are a set of tools that are needed to run certain applications in a
Terminal, including InvokeAI. This package is provided directly by Apple.
Terminal, including InvokeAI. This package is provided
directly by Apple. To install, open a terminal window and run `xcode-select --install`. You will get a macOS system popup guiding you through the
install. If you already have them installed, you will instead see some
output in the Terminal advising you that the tools are already installed. More information can be found at [FreeCode Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
- To install, open a terminal window and run `xcode-select
--install`. You will get a macOS system popup guiding you through the
install. If you already have them installed, you will instead see some
output in the Terminal advising you that the tools are already installed.
3. **Download the Installer**: The InvokeAI installer is distributed as a ZIP files. Go to the
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
and look for a file named:
- More information can be found here:
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
- InvokeAI-installer-v2.X.X.zip
=== "Linux users"
where "2.X.X" is the latest released version. The file is located
at the very bottom of the release page, under **Assets**.
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
directory named "InvokeAI-Installer". When unpacked, the directory
will look like this:
On Ubuntu 22.04 and higher, run the following:
<figure markdown>
![zipfile-screenshot](../assets/installer-walkthrough/unpacked-zipfile.png)
</figure>
```
sudo apt update
sudo apt install -y python3 python3-pip python3-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
5. **Launch the installer script from the desktop**: If you are using a desktop GUI, double-click the installer file
appropriate for your platform. It will be named `install.bat` on
Windows systems and `install.sh` on Linux and Macintosh
systems. Be aware that your system's file browser may suppress the
display of the file extension.
On Ubuntu 20.04, the process is slightly different:
On Windows systems if you get an "Untrusted Publisher" warning.
Click on "More Info" and then select "Run Anyway." You trust us, right?
```
sudo apt update
sudo apt install -y software-properties-common
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt install python3.10 python3-pip python3.10-venv
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
```
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
3. The source installer is distributed in ZIP files. Go to the
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
look for a series of files named:
- [InvokeAI-installer-2.2.4-p5-mac.zip](https://github.com/invoke-ai/InvokeAI/files/10254728/InvokeAI-installer-2.2.4-p5-mac.zip)
- [InvokeAI-installer-2.2.4-p5-windows.zip](https://github.com/invoke-ai/InvokeAI/files/10254729/InvokeAI-installer-2.2.4-p5-windows.zip)
- [InvokeAI-installer-2.2.4-p5-linux.zip](https://github.com/invoke-ai/InvokeAI/files/10254727/InvokeAI-installer-2.2.4-p5-linux.zip)
Download the one that is appropriate for your operating system.
4. Unpack the zip file into a convenient directory. This will create a new
directory named "InvokeAI-Installer". This example shows how this would look
using the `unzip` command-line tool, but you may use any graphical or
command-line Zip extractor:
```cmd
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
creating: InvokeAI-Installer\
inflating: InvokeAI-Installer\install.bat
inflating: InvokeAI-Installer\readme.txt
...
```
After successful installation, you can delete the `InvokeAI-Installer`
directory.
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
accept the dialog box that asks you if you wish to modify your registry.
This activates long filename support on your system and will prevent
mysterious errors during installation.
6. If you are using a desktop GUI, double-click the installer file. It will be
named `install.bat` on Windows systems and `install.sh` on Linux and
Macintosh systems.
On Windows systems you will probably get an "Untrusted Publisher" warning.
Click on "More Info" and select "Run Anyway." You trust us, right?
7. Alternatively, from the command line, run the shell script or .bat file:
6. **[Alternative] Launch the installer script from the command line**: Alternatively, from the command line, run the shell script or .bat file:
```cmd
C:\Documents\Linco> cd InvokeAI-Installer
C:\Documents\Linco\invokeAI> install.bat
```
8. The script will ask you to choose where to install InvokeAI. Select a
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
directory with at least 18G of free space for a full install. InvokeAI and
all its support files will be installed into a new directory named
`invokeai` located at the location you specify.
<figure markdown>
![confirm-install-directory-screenshot](../assets/installer-walkthrough/confirm-directory.png)
</figure>
- The default is to install the `invokeai` directory in your home directory,
usually `C:\Users\YourName\invokeai` on Windows systems,
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
@@ -164,9 +171,23 @@ version of InvokeAI with the option to upgrade to experimental versions later.
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
to suggest completions.
9. Sit back and let the install script work. It will install the third-party
libraries needed by InvokeAI, then download the current InvokeAI release and
install it.
8. **Select your GPU**: The installer will autodetect your platform and will request you to
confirm the type of GPU your graphics card has. On Linux systems,
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
or CPU (no graphics acceleration). On Windows, you'll have the
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
you select CPU on M1 or M2 Macintoshes, you will get MPS-based
graphics acceleration without installing additional drivers. If you
are unsure what GPU you are using, you can ask the installer to
guess.
<figure markdown>
![choose-gpu-screenshot](../assets/installer-walkthrough/choose-gpu.png)
</figure>
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
libraries needed by InvokeAI and the application itself.
Be aware that some of the library download and install steps take a long
time. In particular, the `pytorch` package is quite large and often appears
@@ -176,26 +197,25 @@ version of InvokeAI with the option to upgrade to experimental versions later.
minutes and nothing is happening, you can interrupt the script with ^C. You
may restart it and it will pick up where it left off.
10. After installation completes, the installer will launch a script called
`configure_invokeai.py`, which will guide you through the first-time process
of selecting one or more Stable Diffusion model weights files, downloading
and configuring them. We provide a list of popular models that InvokeAI
performs well with. However, you can add more weight files later on using
the command-line client or the Web UI. See
[Installing Models](050_INSTALLING_MODELS.md) for details.
10. **Post-install Configuration**: After installation completes, the installer will launch the
configuration script, which will guide you through the first-time
process of selecting one or more Stable Diffusion model weights
files, downloading and configuring them. We provide a list of
popular models that InvokeAI performs well with. However, you can
add more weight files later on using the command-line client or
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
details.
Note that the main Stable Diffusion weights file is protected by a license
agreement that you must agree to in order to use. The script will list the
steps you need to take to create an account on the official site that hosts
the weights files, accept the agreement, and provide an access token that
allows InvokeAI to legally download and install the weights files.
<figure markdown>
![downloading-models-screenshot](../assets/installer-walkthrough/downloading-models.png)
</figure>
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
11. The script will now exit and you'll be ready to generate some images. Look
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
for the directory `invokeai` installed in the location you chose at the
beginning of the install session. Look for a shell script named `invoke.sh`
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
@@ -206,64 +226,98 @@ version of InvokeAI with the option to upgrade to experimental versions later.
C:\Documents\Linco\invokeAI> invoke.bat
```
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
(1) the command-line interface, or (2) the web GUI. If you start the
latter, you can load the user interface by pointing your browser at
http://localhost:9090.
- The `invoke.bat` (`invoke.sh`) script will give you the choice
of starting (1) the command-line interface, (2) the web GUI, (3)
textual inversion training, and (4) model merging.
- The script also offers you a third option labeled "open the developer
console". If you choose this option, you will be dropped into a
command-line interface in which you can run python commands directly,
access developer tools, and launch InvokeAI with customized options.
- By default, the script will launch the web interface. When you
do this, you'll see a series of startup messages ending with
instructions to point your browser at
http://localhost:9090. Click on this link to open up a browser
and start exploring InvokeAI's features.
12. You can launch InvokeAI with several different command-line arguments that
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
customize its behavior. For example, you can change the location of the
image output directory, or select your favorite sampler. See the
[Command-Line Interface](../features/CLI.md) for a full list of the options.
- To set defaults that will take effect every time you launch InvokeAI,
use a text editor (e.g. Notepad) to exit the file
`invokeai\invokeai.init`. It contains a variety of examples that you can
follow to add and modify launch options.
- To set defaults that will take effect every time you launch InvokeAI,
use a text editor (e.g. Notepad) to exit the file
`invokeai\invokeai.init`. It contains a variety of examples that you can
follow to add and modify launch options.
- The launcher script also offers you an option labeled "open the developer
console". If you choose this option, you will be dropped into a
command-line interface in which you can run python commands directly,
access developer tools, and launch InvokeAI with customized options.
!!! warning "Do not move or remove the `invokeai` directory"
The `invokeai` directory contains the `invokeai` application, its
configuration files, the model weight files, and outputs of image generation.
Once InvokeAI is installed, do not move or remove this directory."
!!! warning "The `invokeai` directory contains the `invoke` application, its
configuration files, the model weight files, and outputs of image generation.
Once InvokeAI is installed, do not move or remove this directory."
## Troubleshooting
### _Package dependency conflicts_
If you have previously installed InvokeAI or another Stable Diffusion package,
the installer may occasionally pick up outdated libraries and either the
installer or `invoke` will fail with complaints about library conflicts. You can
address this by entering the `invokeai` directory and running `update.sh`, which
will bring InvokeAI up to date with the latest libraries.
If you have previously installed InvokeAI or another Stable Diffusion
package, the installer may occasionally pick up outdated libraries and
either the installer or `invoke` will fail with complaints about
library conflicts. In this case, run the `invoke.sh`/`invoke.bat`
command and enter the Developer's Console by picking option (5). This
will take you to a command-line prompt.
### ldm from pypi
Then give this command:
!!! warning
`pip install InvokeAI --force-reinstall`
Some users have tried to correct dependency problems by installing
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
ldm will make matters worse. If you've installed ldm, uninstall it with
`pip uninstall ldm`.
This should fix the issues.
### InvokeAI runs extremely slowly on Linux or Windows systems
The most frequent cause of this problem is when the installation
process installed the CPU-only version of the torch machine-learning
library, rather than a version that takes advantage of GPU
acceleration. To confirm this issue, look at the InvokeAI startup
messages. If you see a message saying ">> Using device CPU", then
this is what happened.
To fix this problem, first determine whether you have an NVidia or an
AMD GPU. The former uses the CUDA driver, and the latter uses ROCm
(only available on Linux). Then run the `invoke.sh`/`invoke.bat`
command and enter the Developer's Console by picking option (5). This
will take you to a command-line prompt.
Then type the following commands:
=== "NVIDIA System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu117
pip install xformers
```
=== "AMD System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
### Corrupted configuration file
Everything seems to install ok, but `invoke` complains of a corrupted
Everything seems to install ok, but `invokeai` complains of a corrupted
configuration file and goes back into the configuration process (asking you to
download models, etc), but this doesn't fix the problem.
This issue is often caused by a misconfigured configuration directive in the
`invokeai\invokeai.init` initialization file that contains startup settings. The
easiest way to fix the problem is to move the file out of the way and re-run
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
`invokeai-configure`. Enter the developer's console (option 3 of the launcher
script) and run this command:
```cmd
configure_invokeai.py --root=.
invokeai-configure --root=.
```
Note the dot (.) after `--root`. It is part of the command.
@@ -273,7 +327,7 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
assistance.
### other problems
### Other Problems
If you run into problems during or after installation, the InvokeAI team is
available to help you. Either create an
@@ -285,31 +339,34 @@ hours, and often much sooner.
## Updating to newer versions
This distribution is changing rapidly, and we add new features on a daily basis.
To update to the latest released version (recommended), run the `update.sh`
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
release and re-run the `configure_invokeai` script to download any updated
models files that may be needed. You can also use this to add additional models
that you did not select at installation time.
This distribution is changing rapidly, and we add new features
regularly. Releases are announced at
http://github.com/invoke-ai/InvokeAI/releases, and at
https://pypi.org/project/InvokeAI/ To update to the latest released
version (recommended), follow these steps:
You can now close the developer console and run `invoke` as before. If you get
complaints about missing models, then you may need to do the additional step of
running `configure_invokeai.py`. This happens relatively infrequently. To do
this, simply open up the developer's console again and type
`python scripts/configure_invokeai.py`.
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
`invokeai` root directory.
You may also use the `update` script to install any selected version of
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
link of the version you wish to install. You can find the zip links by going to
the one of the release pages and looking for the **Assets** section at the
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
big code directory on the InvokeAI welcome page. When you find the version you
want to install, go to the green "&lt;&gt; Code" button at the top, and copy the
"Download ZIP" link.
2. Choose menu item (6) "Developer's Console". This will launch a new
command line.
3. Type the following command:
```bash
pip install InvokeAI --upgrade
```
4. Watch the installation run. Once it is complete, you may exit the
command line by typing `exit`, and then start InvokeAI from the
launch script as per usual.
Alternatively, if you wish to get the most recent unreleased
development version, perform the same steps to enter the developer's
console, and then type:
```bash
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
```
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
version as its argument. For example, this will install the old 2.2.0 release.
```cmd
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
```

View File

@@ -3,361 +3,187 @@ title: Installing Manually
---
<figure markdown>
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
</figure>
!!! warning "This is for advanced Users"
who are already experienced with using conda or pip
**python experience is mandatory**
## Introduction
You have two choices for manual installation, the [first
one](#PIP_method) uses basic Python virtual environment (`venv`)
commands and the PIP package manager. The [second one](#Conda_method)
based on the Anaconda3 package manager (`conda`). Both methods require
you to enter commands on the terminal, also known as the "console".
!!! tip "Conda"
As of InvokeAI v2.3.0 installation using the `conda` package manager is no longer being supported. It will likely still work, but we are not testing this installation method.
Note that the conda install method is currently deprecated and will not
be supported at some point in the future.
On Windows systems you are encouraged to install and use the
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
On Windows systems, you are encouraged to install and use the
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
which provides compatibility with Linux and Mac shells and nice
features such as command-line completion.
## pip Install
### Prerequisites
Before you start, make sure you have the following preqrequisites
installed. These are described in more detail in [Automated
Installation](010_INSTALL_AUTOMATED.md), and in many cases will
already be installed (if, for example, you have used your system for
gaming):
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **Visual C++ Libraries** _Windows users_ must install the free
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
* **The Xcode command line tools** for _Macintosh users_. Instructions are
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* _Macintosh users_ may also need to run the `Install Certificates` command
if model downloads give lots of certificate errors. Run:
`/Applications/Python\ 3.10/Install\ Certificates.command`
### Installation Walkthrough
To install InvokeAI with virtual environments and the PIP package
manager, please follow these steps:
1. Make sure you are using Python 3.9 or 3.10. The rest of the install
procedure depends on this:
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
procedure depends on this and will not work with other versions:
```bash
python -V
```
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
GitHub:
2. Create a directory to contain your InvokeAI library, configuration
files, and models. This is known as the "runtime" or "root"
directory, and often lives in your home directory under the name `invokeai`.
Please keep in mind the disk space requirements - you will need at
least 20GB for the models and the virtual environment. From now
on we will refer to this directory as `INVOKEAI_ROOT`. For convenience,
the steps below create a shell variable of that name which contains the
path to `HOME/invokeai`.
=== "Linux/Mac"
```bash
export INVOKEAI_ROOT="~/invokeai"
mkdir $INVOKEAI_ROOT
```
=== "Windows (Powershell)"
```bash
Set-Variable -Name INVOKEAI_ROOT -Value $Home/invokeai
mkdir $INVOKEAI_ROOT
```
3. Enter the root (invokeai) directory and create a virtual Python
environment within it named `.venv`. If the command `python`
doesn't work, try `python3`. Note that while you may create the
virtual environment anywhere in the file system, we recommend that
you create it within the root directory as shown here. This makes
it possible for the InvokeAI applications to find the model data
and configuration. If you do not choose to install the virtual
environment inside the root directory, then you **must** set the
`INVOKEAI_ROOT` environment variable in your shell environment, for
example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the
Windows environment variable using the Advanced System Settings dialogue.
Refer to your operating system documentation for details.
=== "Linux/Mac"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
=== "Windows"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
4. Activate the new environment:
=== "Linux/Mac"
```bash
source .venv/bin/activate
```
=== "Windows"
```bash
.venv\script\activate
```
If you get a permissions error at this point, run the command
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
and try `activate` again.
The command-line prompt should change to to show `(.venv)` at the
beginning of the prompt. Note that all the following steps should be
run while inside the INVOKEAI_ROOT directory
5. Make sure that pip is installed in your virtual environment and up to date:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
python -m pip install --upgrade pip
```
This will create InvokeAI folder where you will follow the rest of the
steps.
3. From within the InvokeAI top-level directory, create and activate a virtual
environment named `invokeai`:
```bash
python -mvenv invokeai
source invokeai/bin/activate
```
4. Make sure that pip is installed in your virtual environment an up to date:
```bash
python -mensurepip --upgrade
python -mpip install --upgrade pip
```
5. Pick the correct `requirements*.txt` file for your hardware and operating
system.
We have created a series of environment files suited for different operating
systems and GPU hardware. They are located in the
`environments-and-requirements` directory:
<figure markdown>
| filename | OS |
| :---------------------------------: | :-------------------------------------------------------------: |
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
| requirements-lin-arm64.txt | Linux running on arm64 systems |
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
</figure>
Select the appropriate requirements file, and make a link to it from
`requirements.txt` in the top-level InvokeAI directory. The command to do
this from the top-level directory is:
!!! example ""
=== "Macintosh and Linux"
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
```bash
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
```
=== "Windows"
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
```cmd
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
```
!!! warning
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
This is a base requirements file that does not have the platform-specific
libraries. Also, be sure to link or copy the platform-specific file to
a top-level file named `requirements.txt` as shown here. Running pip on
a requirements file in a subdirectory will not work as expected.
When this is done, confirm that a file named `requirements.txt` has been
created in the InvokeAI root directory and that it points to the correct
file in `environments-and-requirements`.
6. Run PIP
Be sure that the `invokeai` environment is active before doing this:
```bash
pip install --prefer-binary -r requirements.txt
```
7. Set up the runtime directory
In this step you will initialize a runtime directory that will
contain the models, model config files, directory for textual
inversion embeddings, and your outputs. This keeps the runtime
directory separate from the source code and aids in updating.
You may pick any location for this directory using the `--root_dir`
option (abbreviated --root). If you don't pass this option, it will
default to `invokeai` in your home directory.
```bash
configure_invokeai.py --root_dir ~/Programs/invokeai
```
The script `configure_invokeai.py` will interactively guide you through the
process of downloading and installing the weights files needed for InvokeAI.
Note that the main Stable Diffusion weights file is protected by a license
agreement that you have to agree to. The script will list the steps you need
to take to create an account on the site that hosts the weights files,
accept the agreement, and provide an access token that allows InvokeAI to
legally download and install the weights files.
If you get an error message about a module not being installed, check that
the `invokeai` environment is active and if not, repeat step 5.
Note that `configure_invokeai.py` and `invoke.py` should be installed
under your virtual environment directory and the system should find them
on the PATH. If this isn't working on your system, you can call the
scripts directory using `python scripts/configure_invokeai.py` and
`python scripts/invoke.py`.
!!! tip
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [here](050_INSTALLING_MODELS.md).
8. Run the command-line- or the web- interface:
Activate the environment (with `source invokeai/bin/activate`), and then
run the script `invoke.py`. If you selected a non-default location
for the runtime directory, please specify the path with the `--root_dir`
option (abbreviated below as `--root`):
!!! example ""
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
=== "CLI"
```bash
invoke.py --root ~/Programs/invokeai
```
=== "local Webserver"
```bash
invoke.py --web --root ~/Programs/invokeai
```
=== "Public Webserver"
```bash
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
```
If you choose the run the web interface, point your browser at
http://localhost:9090 in order to load the GUI.
!!! tip
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
9. Render away!
Browse the [features](../features/CLI.md) section to learn about all the things you
can do with InvokeAI.
Note that some GPUs are slow to warm up. In particular, when using an AMD
card with the ROCm driver, you may have to wait for over a minute the first
time you try to generate an image. Fortunately, after the warm up period
rendering will be fast.
10. Subsequently, to relaunch the script, be sure to run "conda activate
invokeai", enter the `InvokeAI` directory, and then launch the invoke
script. If you forget to activate the 'invokeai' environment, the script
will fail with multiple `ModuleNotFound` errors.
!!! tip
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
---
### Conda method
1. Check that your system meets the
[hardware requirements](index.md#Hardware_Requirements) and has the
appropriate GPU drivers installed. In particular, if you are a Linux user
with an AMD GPU installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
of ROCm driver support on this platform.
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
information about the installed video card.
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
can skip this step.
2. You will need to install Anaconda3 and Git if they are not already
available. Use your operating system's preferred package manager, or
download the installers manually. You can find them here:
- [Anaconda3](https://www.anaconda.com/)
- [git](https://git-scm.com/downloads)
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
GitHub:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create InvokeAI folder where you will follow the rest of the
steps.
4. Enter the newly-created InvokeAI folder:
```bash
cd InvokeAI
```
From this step forward make sure that you are working in the InvokeAI
directory!
5. Select the appropriate environment file:
We have created a series of environment files suited for different operating
systems and GPU hardware. They are located in the
`environments-and-requirements` directory:
<figure markdown>
| filename | OS |
| :----------------------: | :----------------------------: |
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
| environment-mac.yml | Macintosh |
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
</figure>
Choose the appropriate environment file for your system and link or copy it
to `environment.yml` in InvokeAI's top-level directory. To do so, run
following command from the repository-root:
!!! Example ""
=== "Macintosh and Linux"
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
```bash
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
```
When this is done, confirm that a file `environment.yml` has been linked in
the InvokeAI root directory and that it points to the correct file in the
`environments-and-requirements`.
```bash
ls -la
```
=== "Windows"
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
```cmd
copy environments-and-requirements\environment-win-cuda.yml environment.yml
```
Afterwards verify that the file `environment.yml` has been created, either via the
explorer or by using the command `dir` from the terminal
```cmd
dir
```
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
6. Create the conda environment:
```bash
conda env update
```
This will create a new environment named `invokeai` and install all InvokeAI
dependencies into it. If something goes wrong you should take a look at
[troubleshooting](#troubleshooting).
7. Activate the `invokeai` environment:
In order to use the newly created environment you will first need to
activate it
```bash
conda activate invokeai
```
Your command-line prompt should change to indicate that `invokeai` is active
by prepending `(invokeai)`.
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
=== "CUDA (NVidia)"
```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
=== "ROCm (AMD)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
become available in the environment
=== "Linux/Macintosh"
```bash
deactivate && source .venv/bin/activate
```
=== "Windows"
```bash
deactivate
.venv\Scripts\activate
```
8. Set up the runtime directory
In this step you will initialize a runtime directory that will
contain the models, model config files, directory for textual
inversion embeddings, and your outputs. This keeps the runtime
directory separate from the source code and aids in updating.
You may pick any location for this directory using the `--root_dir`
option (abbreviated --root). If you don't pass this option, it will
default to `invokeai` in your home directory.
In this step you will initialize your runtime directory with the downloaded
models, model config files, directory for textual inversion embeddings, and
your outputs.
```bash
python scripts/configure_invokeai.py --root_dir ~/Programs/invokeai
invokeai-configure
```
The script `configure_invokeai.py` will interactively guide you through the
The script `invokeai-configure` will interactively guide you through the
process of downloading and installing the weights files needed for InvokeAI.
Note that the main Stable Diffusion weights file is protected by a license
agreement that you have to agree to. The script will list the steps you need
@@ -368,46 +194,41 @@ manager, please follow these steps:
If you get an error message about a module not being installed, check that
the `invokeai` environment is active and if not, repeat step 5.
Note that `configure_invokeai.py` and `invoke.py` should be
installed under your conda directory and the system should find
them automatically on the PATH. If this isn't working on your
system, you can call the scripts directory using `python
scripts/configure_invoke.py` and `python scripts/invoke.py`.
!!! tip
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [here](050_INSTALLING_MODELS.md).
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
9. Run the command-line- or the web- interface:
Activate the environment (with `source invokeai/bin/activate`), and then
run the script `invoke.py`. If you selected a non-default location
for the runtime directory, please specify the path with the `--root_dir`
option (abbreviated below as `--root`):
From within INVOKEAI_ROOT, activate the environment
(with `source .venv/bin/activate` or `.venv\scripts\activate), and then run
the script `invokeai`. If the virtual environment you selected is NOT inside
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
`--root_dir \path\to\invokeai` to the commands below:
!!! example ""
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
=== "CLI"
```bash
invoke.py --root ~/Programs/invokeai
invokeai
```
=== "local Webserver"
```bash
invoke.py --web --root ~/Programs/invokeai
invokeai --web
```
=== "Public Webserver"
```bash
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
invokeai --web --host 0.0.0.0
```
If you choose the run the web interface, point your browser at
@@ -415,175 +236,99 @@ manager, please follow these steps:
!!! tip
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of your choice.
You can permanently set the location of the runtime directory
by setting the environment variable `INVOKEAI_ROOT` to the
path of the directory. As mentioned previously, this is
*highly recommended** if your virtual environment is located outside of
your runtime directory.
10. Render away!
10. Render away!
Browse the [features](../features/CLI.md) section to learn about all the things you
can do with InvokeAI.
Browse the [features](../features/CLI.md) section to learn about all the
things you can do with InvokeAI.
Note that some GPUs are slow to warm up. In particular, when using an AMD
card with the ROCm driver, you may have to wait for over a minute the first
time you try to generate an image. Fortunately, after the warm up period
rendering will be fast.
11. Subsequently, to relaunch the script, be sure to run "conda activate
invokeai", enter the `InvokeAI` directory, and then launch the invoke
script. If you forget to activate the 'invokeai' environment, the script
will fail with multiple `ModuleNotFound` errors.
11. Subsequently, to relaunch the script, activate the virtual environment, and
then launch `invokeai` command. If you forget to activate the virtual
environment you will most likeley receive a `command not found` error.
## Creating an "install" version of InvokeAI
!!! warning
If you wish you can install InvokeAI and all its dependencies in the
runtime directory. This allows you to delete the source code
repository and eliminates the need to provide `--root_dir` at startup
time. Note that this method only works with the PIP method.
Do not move the runtime directory after installation. The virtual environment will get confused if the directory is moved.
1. Follow the instructions for the PIP install, but in step #2 put the
virtual environment into the runtime directory. For example, assuming the
runtime directory lives in `~/Programs/invokeai`, you'd run:
12. Other scripts
The [Textual Inversion](../features/TEXTUAL_INVERSION.md) script can be launched with the command:
```bash
invokeai-ti --gui
```
Similarly, the [Model Merging](../features/MODEL_MERGING.md) script can be launched with the command:
```bash
invokeai-merge --gui
```
Leave off the `--gui` option to run the script using command-line arguments. Pass the `--help` argument
to get usage instructions.
### Developer Install
If you have an interest in how InvokeAI works, or you would like to
add features or bugfixes, you are encouraged to install the source
code for InvokeAI. For this to work, you will need to install the
`git` source code management program. If it is not already installed
on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git)
1. From the command line, run this command:
```bash
python -menv ~/Programs/invokeai
git clone https://github.com/invoke-ai/InvokeAI.git
```
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
step.
This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository.
3. Run one additional step while you are in the source code repository
directory `pip install .` (note the dot at the end).
2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!)
4. That's all! Now, whenever you activate the virtual environment,
`invoke.py` will know where to look for the runtime directory without
needing a `--root_dir` argument. In addition, you can now move or
delete the source code repository entirely.
3. Enter the InvokeAI repository directory and run one of these
commands, based on your GPU:
(Don't move the runtime directory!)
=== "CUDA (NVidia)"
```bash
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
## Updating to newer versions of the script
=== "ROCm (AMD)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
This distribution is changing rapidly. If you used the `git clone` method
(step 5) to download the InvokeAI directory, then to update to the latest and
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
```bash
git pull
conda env update
python scripts/configure_invokeai.py --skip-sd-weights #optional
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
This will bring your local copy into sync with the remote one. The last step may
be needed to take advantage of new features or released models. The
`--skip-sd-weights` flag will prevent the script from prompting you to download
the big Stable Diffusion weights files.
Be sure to pass `-e` (for an editable install) and don't forget the
dot ("."). It is part of the command.
## Troubleshooting
You can now run `invokeai` and its related commands. The code will be
read from the repository, so that you can edit the .py source files
and watch the code's behavior change.
Here are some common issues and their suggested solutions.
4. If you wish to contribute to the InvokeAI project, you are
encouraged to establish a GitHub account and "fork"
https://github.com/invoke-ai/InvokeAI into your own copy of the
repository. You can then use GitHub functions to create and submit
pull requests to contribute improvements to the project.
### Conda
#### Conda fails before completing `conda update`
The usual source of these errors is a package incompatibility. While we have
tried to minimize these, over time packages get updated and sometimes introduce
incompatibilities.
We suggest that you search
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
You may also try to install the broken packages manually using PIP. To do this,
activate the `invokeai` environment, and run `pip install` with the name and
version of the package that is causing the incompatibility. For example:
```bash
pip install test-tube==0.7.5
```
You can keep doing this until all requirements are satisfied and the `invoke.py`
script runs without errors. Please report to
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
to work around the problem so that others can benefit from your investigation.
### Create Conda Environment fails on MacOS
If conda create environment fails with lmdb error, this is most likely caused by Clang.
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
Start by installing additional XCode command line tools, followed by brew install llvm.
```bash
xcode-select --install
brew install llvm
```
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
This is usually due to an incomplete or corrupted Conda install. Make sure you
have linked to the correct environment file and run `conda update` again.
If the problem persists, a more extreme measure is to clear Conda's caches and
remove the `invokeai` environment:
```bash
conda deactivate
conda env remove -n invokeai
conda clean -a
conda update
```
This removes all cached library files, including ones that may have been
corrupted somehow. (This is not supposed to happen, but does anyway).
#### `invoke.py` crashes at a later stage
If the CLI or web site had been working ok, but something unexpected happens
later on during the session, you've encountered a code bug that is probably
unrelated to an install issue. Please search
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
#### My renders are running very slowly
You may have installed the wrong torch (machine learning) package, and the
system is running on CPU rather than the GPU. To check, look at the log messages
that appear when `invoke.py` is first starting up. One of the earlier lines
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
and on Macintoshes, it should say "mps". If instead the message says it is
running on "cpu", then you may need to install the correct torch library.
You may be able to fix this by installing a different torch library. Here are
the magic incantations for Conda and PIP.
!!! todo "For CUDA systems"
- conda
```bash
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
```
- pip
```bash
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
```
!!! todo "For AMD systems"
- conda
```bash
conda activate invokeai
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
```
- pip
```bash
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
```
More information and troubleshooting tips can be found at https://pytorch.org.
Please see [Contributing](/index.md#Contributing) for hints
on getting started.

View File

@@ -0,0 +1,125 @@
---
title: NVIDIA Cuda / AMD ROCm
---
<figure markdown>
# :simple-nvidia: CUDA | :simple-amd: ROCm
</figure>
In order for InvokeAI to run at full speed, you will need a graphics
card with a supported GPU. InvokeAI supports NVidia cards via the CUDA
driver on Windows and Linux, and AMD cards via the ROCm driver on Linux.
## :simple-nvidia: CUDA
### Linux and Windows Install
If you have used your system for other graphics-intensive tasks, such
as gaming, you may very well already have the CUDA drivers
installed. To confirm, open up a command-line window and type:
```
nvidia-smi
```
If this command produces a status report on the GPU(s) installed on
your system, CUDA is installed and you have no more work to do. If
instead you get "command not found", or similar, then the driver will
need to be installed.
We strongly recommend that you install the CUDA Toolkit package
directly from NVIDIA. **Do not try to install Ubuntu's
nvidia-cuda-toolkit package. It is out of date and will cause
conflicts among the NVIDIA driver and binaries.**
Go to [CUDA Toolkit 11.7
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive),
and use the target selection wizard to choose your operating system,
hardware platform, and preferred installation method (e.g. "local"
versus "network").
This will provide you with a downloadable install file or, depending
on your choices, a recipe for downloading and running a install shell
script. Be sure to read and follow the full installation instructions.
After an install that seems successful, you can confirm by again
running `nvidia-smi` from the command line.
### Linux Install with a Runtime Container
On Linux systems, an alternative to installing CUDA Toolkit directly on
your system is to run an NVIDIA software container that has the CUDA
libraries already in place. This is recommended if you are already
familiar with containerization technologies such as Docker.
For downloads and instructions, visit the [NVIDIA CUDA Container
Runtime Site](https://developer.nvidia.com/nvidia-container-runtime)
### Torch Installation
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/cu117` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
## :simple-amd: ROCm
### Linux Install
AMD GPUs are only supported on Linux platforms due to the lack of a
Windows ROCm driver at the current time. Also be aware that support
for newer AMD GPUs is spotty. Your mileage may vary.
It is possible that the ROCm driver is already installed on your
machine. To test, open up a terminal window and issue the following
command:
```
rocm-smi
```
If you get a table labeled "ROCm System Management Interface" the
driver is installed and you are done. If you get "command not found,"
then the driver needs to be installed.
Go to AMD's [ROCm Downloads
Guide](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation_new.html#installation-methods)
and scroll to the _Installation Methods_ section. Find the subsection
for the install method for your preferred Linux distribution, and
issue the commands given in the recipe.
Annoyingly, the official AMD site does not have a recipe for the most
recent version of Ubuntu, 22.04. However, this [community-contributed
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
to work well.
After installation, please run `rocm-smi` a second time to confirm
that the driver is present and the GPU is recognized. You may need to
do a reboot in order to load the driver.
### Linux Install with a ROCm-docker Container
If you are comfortable with the Docker containerization system, then
you can build a ROCm docker file. The source code and installation
recipes are available
[Here](https://github.com/RadeonOpenCompute/ROCm-docker/blob/master/quick-start.md)
### Torch Installation
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
This will be done automatically for you if you use the installer
script.
Be aware that the torch machine learning library does not seamlessly
interoperate with all AMD GPUs and you may experience garbled images,
black images, or long startup delays before rendering commences. Most
of these issues can be solved by Googling for workarounds. If you have
a problem and find a solution, please post an
[Issue](https://github.com/invoke-ai/InvokeAI/issues) so that other
users benefit and we can update this document.

View File

@@ -16,10 +16,6 @@ title: Installing with Docker
For general use, install locally to leverage your machine's GPU.
!!! tip "For running on a cloud instance/service"
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
## Why containers?
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
@@ -78,38 +74,40 @@ Some Suggestions of variables you may want to change besides the Token:
<figure markdown>
| Environment-Variable | Default value | Description |
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch |
| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag |
| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository |
| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. |
| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used |
| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development |
| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url |
</figure>
#### Build the Image
I provided a build script, which is located in `docker-build/build.sh` but still
needs to be executed from the Repository root.
I provided a build script, which is located next to the Dockerfile in
`docker/build.sh`. It can be executed from repository root like this:
```bash
./docker-build/build.sh
./docker/build.sh
```
The build Script not only builds the container, but also creates the docker
volume if not existing yet, or if empty it will just download the models.
volume if not existing yet.
#### Run the Container
After the build process is done, you can run the container via the provided
`docker-build/run.sh` script
`docker/run.sh` script
```bash
./docker-build/run.sh
./docker/run.sh
```
When used without arguments, the container will start the webserver and provide
@@ -119,7 +117,7 @@ also do so.
!!! example "run script example"
```bash
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
./docker/run.sh "banana sushi" -Ak_lms -S42 -s10
```
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
@@ -130,16 +128,18 @@ also do so.
## Running the container on your GPU
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
environment variable to enable GPU usage and have the process run much faster:
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running
the container with an extra environment variable to enable GPU usage and have
the process run much faster:
```bash
GPU_FLAGS=all ./docker-build/run.sh
GPU_FLAGS=all ./docker/run.sh
```
This passes the `--gpus all` to docker and uses the GPU.
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
If you don't have a GPU (or your host is not yet setup to use it) you will see a
message like this:
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
@@ -147,84 +147,8 @@ You can use the full set of GPU combinations documented here:
https://docs.docker.com/config/containers/resource_constraints/#gpu
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
## Running InvokeAI in the cloud with Docker
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
An advantage of this method is that it does not need any local setup or additional dependencies.
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
### Prerequisites
- a `docker` runtime
- `make` (optional but helps for convenience)
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
### Building and running the image locally
1. Clone this repo and `cd docker-build`
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
- `make configure` (This does *not* require a GPU-capable system)
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
- enter your Huggingface token when prompted
1. `make web`
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
#### Building and running without `make`
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
!!! example "Build the image and configure the runtime directory"
```Shell
cd docker-build
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
```
!!! example "Run the web server"
```Shell
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
```
Access the Web UI at http://localhost:9090
!!! example "Run the InvokeAI interactive CLI"
```
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
```
### Running the image in the cloud
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
1. build this image either in the cloud (you'll need to pull the repo), or locally
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
1. `docker pull` it on your cloud instance
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
1. use either one of the `docker run` commands above, substituting the image name for your own image.
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
The template's `README` provides ample detail, but at a high level, the process is as follows:
1. create a pod using this Docker image
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
1. Run the pod with `sleep infinity` as the Docker command
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to
choose a specific device identified by a UUID.
---
@@ -240,13 +164,12 @@ Running on other cloud providers such as Vast.ai will likely work in a similar f
If you're on a **Linux container** the `invoke` script is **automatically
started** and the output dir set to the Docker volume you created earlier.
If you're **directly on macOS follow these startup instructions**.
With the Conda environment activated (`conda activate ldm`), run the interactive
If you're **directly on macOS follow these startup instructions**. With the
Conda environment activated (`conda activate ldm`), run the interactive
interface that combines the functionality of the original scripts `txt2img` and
`img2img`:
Use the more accurate but VRAM-intensive full precision math because
half-precision requires autocast and won't work.
By default the images are saved in `outputs/img-samples/`.
`img2img`: Use the more accurate but VRAM-intensive full precision math because
half-precision requires autocast and won't work. By default the images are saved
in `outputs/img-samples/`.
```Shell
python3 scripts/invoke.py --full_precision
@@ -262,9 +185,9 @@ invoke> q
### Text to Image
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
image. This will let you know that everything is set up correctly.
Then increase steps to 100 or more for good (but slower) results.
The prompt can be in quotes or not.
image. This will let you know that everything is set up correctly. Then increase
steps to 100 or more for good (but slower) results. The prompt can be in quotes
or not.
```Shell
invoke> The hulk fighting with sheldon cooper -s5 -n1
@@ -277,10 +200,9 @@ You'll need to experiment to see if face restoration is making it better or
worse for your specific prompt.
If you're on a container the output is set to the Docker volume. You can copy it
wherever you want.
You can download it from the Docker Desktop app, Volumes, my-vol, data.
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
`*.png` so you'll need to specify the image file name.
wherever you want. You can download it from the Docker Desktop app, Volumes,
my-vol, data. Or you can copy it from your Mac terminal. Keep in mind
`docker cp` can't expand `*.png` so you'll need to specify the image file name.
On your host Mac (you can use the name of any container that mounted the
volume):

View File

@@ -4,249 +4,347 @@ title: Installing Models
# :octicons-paintbrush-16: Installing Models
## Model Weight Files
## Checkpoint and Diffusers Models
The model weight files ('\*.ckpt') are the Stable Diffusion "secret sauce". They
are the product of training the AI on millions of captioned images gathered from
multiple sources.
The model checkpoint files ('\*.ckpt') are the Stable Diffusion
"secret sauce". They are the product of training the AI on millions of
captioned images gathered from multiple sources.
Originally there was only a single Stable Diffusion weights file, which many
people named `model.ckpt`. Now there are dozens or more that have been "fine
tuned" to provide particulary styles, genres, or other features. InvokeAI allows
you to install and run multiple model weight files and switch between them
quickly in the command-line and web interfaces.
Originally there was only a single Stable Diffusion weights file,
which many people named `model.ckpt`. Now there are dozens or more
that have been fine tuned to provide particulary styles, genres, or
other features. In addition, there are several new formats that
improve on the original checkpoint format: a `.safetensors` format
which prevents malware from masquerading as a model, and `diffusers`
models, the most recent innovation.
This manual will guide you through installing and configuring model weight
files.
InvokeAI supports all three formats but strongly prefers the
`diffusers` format. These are distributed as directories containing
multiple subfolders, each of which contains a different aspect of the
model. The advantage of this is that the models load from disk really
fast. Another advantage is that `diffusers` models are supported by a
large and active set of open source developers working at and with
HuggingFace organization, and improvements in both rendering quality
and performance are being made at a rapid pace. Among other features
is the ability to download and install a `diffusers` model just by
providing its HuggingFace repository ID.
While InvokeAI will continue to support `.ckpt` and `.safetensors`
models for the near future, these are deprecated and support will
likely be withdrawn at some point in the not-too-distant future.
This manual will guide you through installing and configuring model
weight files and converting legacy `.ckpt` and `.safetensors` files
into performant `diffusers` models.
## Base Models
InvokeAI comes with support for a good initial set of models listed in the model
configuration file `configs/models.yaml`. They are:
InvokeAI comes with support for a good set of starter models. You'll
find them listed in the master models file
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
subset that are currently installed are found in
`configs/models.yaml`. The current list is:
| Model | Weight File | Description | DOWNLOAD FROM |
| Model | HuggingFace Repo ID | Description | URL
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
| `<all models>` | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
Note that these files are covered by an "Ethical AI" license which forbids
certain uses. You will need to create an account on the Hugging Face website and
accept the license terms before you can access the files.
The predefined configuration file for InvokeAI (located at
`configs/models.yaml`) provides entries for each of these weights files.
`stable-diffusion-1.5` is the default model used, and we strongly recommend that
you install this weights file if nothing else.
certain uses. When you initially download them, you are asked to
accept the license terms.
## Community-Contributed Models
There are too many to list here and more are being contributed every day.
Hugging Face maintains a
[fast-growing repository](https://huggingface.co/sd-concepts-library) of
fine-tune (".bin") models that can be imported into InvokeAI by passing the
`--embedding_path` option to the `invoke.py` command.
There are too many to list here and more are being contributed every
day. [HuggingFace](https://huggingface.co/models?library=diffusers)
is a great resource for diffusers models, and is also the home of a
[fast-growing repository](https://huggingface.co/sd-concepts-library)
of embedding (".bin") models that add subjects and/or styles to your
images. The latter are automatically installed on the fly when you
include the text `<concept-name>` in your prompt. See [Concepts
Library](../features/CONCEPTS.md) for more information.
[This page](https://rentry.org/sdmodels) hosts a large list of official and
unofficial Stable Diffusion models and where they can be obtained.
Another popular site for community-contributed models is
[CIVITAI](https://civitai.com). This extensive site currently supports
only `.safetensors` and `.ckpt` models, but they can be easily loaded
into InvokeAI and/or converted into optimized `diffusers` models. Be
aware that CIVITAI hosts many models that generate NSFW content.
## Installation
There are three ways to install weights files:
There are multiple ways to install and manage models:
1. During InvokeAI installation, the `configure_invokeai.py` script can download
them for you.
1. The `invokeai-configure` script which will download and install them for you.
2. You can use the command-line interface (CLI) to import, configure and modify
new models files.
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
models files.
3. You can download the files manually and add the appropriate entries to
`models.yaml`.
3. The web interface (WebUI) has a GUI for importing and managing
models.
### Installation via `configure_invokeai.py`
### Installation via `invokeai-configure`
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
console. It will ask you to select which models to download and lead you through
the steps of setting up a Hugging Face account if you haven't done so already.
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
directory
!!! example ""
```text
Loading Python libraries...
** INTRODUCTION **
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
and other large models that are needed for text to image generation. At any point you may interrupt
this program and resume later.
** WEIGHT SELECTION **
Would you like to download the Stable Diffusion model weights now? [y]
Choose the weight file(s) you wish to download. Before downloading you
will be given the option to view and change your selections.
[1] stable-diffusion-1.5:
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
Download? [y]
[2] inpainting-1.5:
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
Download? [y]
[3] stable-diffusion-1.4:
The original Stable Diffusion version 1.4 weight file (4.27 GB)
Download? [n] n
[4] waifu-diffusion-1.3:
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
Download? [n] y
[5] ft-mse-improved-autoencoder-840000:
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
Download? [y] y
The following weight files will be downloaded:
[1] stable-diffusion-1.5*
[2] inpainting-1.5
[4] waifu-diffusion-1.3
[5] ft-mse-improved-autoencoder-840000
*default
Ok to download? [y]
** LICENSE AGREEMENT FOR WEIGHT FILES **
1. To download the Stable Diffusion weight files you need to read and accept the
CreativeML Responsible AI license. If you have not already done so, please
create an account using the "Sign Up" button:
https://huggingface.co
You will need to verify your email address as part of the HuggingFace
registration process.
2. After creating the account, login under your account and accept
the license terms located here:
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
Press <enter> when you are ready to continue:
...
```
When the script is complete, you will find the downloaded weights files in
`models/ldm/stable-diffusion-v1` and a matching configuration file in
`configs/models.yaml`.
You can run the script again to add any models you didn't select the first time.
Note that as a safety measure the script will _never_ remove a
previously-installed weights file. You will have to do this manually.
From the `invoke` launcher, choose option (6) "re-run the configure
script to download new models." This will launch the same script that
prompted you to select models at install time. You can use this to add
models that you skipped the first time around. It is all right to
specify a model that was previously downloaded; the script will just
confirm that the files are complete.
### Installation via the CLI
You can install a new model, including any of the community-supported ones, via
the command-line client's `!import_model` command.
1. First download the desired model weights file and place it under
`models/ldm/stable-diffusion-v1/`. You may rename the weights file to
something more memorable if you wish. Record the path of the weights file
(e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
#### Installing `.ckpt` and `.safetensors` models
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
If the model is already downloaded to your local disk, use
`!import_model /path/to/file.ckpt` to load it. For example:
3. At the `invoke>` command-line, enter the command
`!import_model <path to model>`. For example:
```bash
invoke> !import_model C:/Users/fred/Downloads/martians.safetensors
```
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
!!! tip "Forward Slashes"
On Windows systems, use forward slashes rather than backslashes
in your file paths.
If you do use backslashes,
you must double them like this:
`C:\\Users\\fred\\Downloads\\martians.safetensors`
!!! tip "the CLI supports file path autocompletion"
Alternatively you can directly import the file using its URL:
```bash
invoke> !import_model https://example.org/sd_models/martians.safetensors
```
For this to work, the URL must not be password-protected. Otherwise
you will receive a 404 error.
When you import a legacy model, the CLI will ask you a few questions
about the model, including what size image it was trained on (usually
512x512), what name and description you wish to use for it, what
configuration file to use for it (usually the default
`v1-inference.yaml`), whether you'd like to make this model the
default at startup time, and whether you would like to install a
custom VAE (variable autoencoder) file for the model. For recent
models, the answer to the VAE question is usually "no," but it won't
hurt to answer "yes".
#### Installing `diffusers` models
You can install a `diffusers` model from the HuggingFace site using
`!import_model` and the HuggingFace repo_id for the model:
```bash
invoke> !import_model andite/anything-v4.0
```
Alternatively, you can download the model to disk and import it from
there. The model may be distributed as a ZIP file, or as a Git
repository:
```bash
invoke> !import_model C:/Users/fred/Downloads/andite--anything-v4.0
```
!!! tip "The CLI supports file path autocompletion"
Type a bit of the path name and hit ++tab++ in order to get a choice of
possible completions.
!!! tip "on Windows, you can drag model files onto the command-line"
!!! tip "On Windows, you can drag model files onto the command-line"
Once you have typed in `!import_model `, you can drag the
model file or directory onto the command-line to insert the model path. This way, you don't need to
type it or copy/paste. However, you will need to reverse or
double backslashes as noted above.
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
onto the command-line to insert the model path. This way, you don't need to
type it or copy/paste.
Before installing, the CLI will ask you for a short name and
description for the model, whether to make this the default model that
is loaded at InvokeAI startup time, and whether to replace its
VAE. Generally the answer to the latter question is "no".
4. Follow the wizard's instructions to complete installation as shown in the
example here:
### Converting legacy models into `diffusers`
!!! example ""
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
models file into `diffusers` and install it.This will enable the model
to load and run faster without loss of image quality.
```text
invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
>> Model import in process. Please enter the values needed to configure this model:
The usage is identical to `!import_model`. You may point the command
to either a downloaded model file on disk, or to a (non-password
protected) URL:
Name for this model: arabian-nights
Description of this model: Arabian Nights Fine Tune v1.0
Configuration file for this model: configs/stable-diffusion/v1-inference.yaml
Default image width: 512
Default image height: 512
>> New configuration:
arabian-nights:
config: configs/stable-diffusion/v1-inference.yaml
description: Arabian Nights Fine Tune v1.0
height: 512
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
width: 512
OK to import [n]? y
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
```
```bash
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
```
If you've previously installed the fine-tune VAE file
`vae-ft-mse-840000-ema-pruned.ckpt`, the wizard will also ask you if you want to
add this VAE to the model.
After a successful conversion, the CLI will offer you the option of
deleting the original `.ckpt` or `.safetensors` file.
The appropriate entry for this model will be added to `configs/models.yaml` and
it will be available to use in the CLI immediately.
### Optimizing a previously-installed model
The CLI has additional commands for switching among, viewing, editing, deleting
the available models. These are described in
[Command Line Client](../features/CLI.md#model-selection-and-importation), but
the two most frequently-used are `!models` and `!switch <name of model>`. The
first prints a table of models that InvokeAI knows about and their load status.
The second will load the requested model and lets you switch back and forth
quickly among loaded models.
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
file and wish to convert it into a `diffusers` model, you can do this
without re-downloading and converting the original file using the
`!optimize_model` command. Simply pass the short name of an existing
installed model:
```bash
invoke> !optimize_model martians-v1.0
```
The model will be converted into `diffusers` format and replace the
previously installed version. You will again be offered the
opportunity to delete the original `.ckpt` or `.safetensors` file.
### Related CLI Commands
There are a whole series of additional model management commands in
the CLI that you can read about in [Command-Line
Interface](../features/CLI.md). These include:
* `!models` - List all installed models
* `!switch <model name>` - Switch to the indicated model
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
* `!del_model <model name>` - Delete the indicated model
### Manually editing `configs/models.yaml`
### Manually editing of `configs/models.yaml`
If you are comfortable with a text editor then you may simply edit `models.yaml`
directly.
First you need to download the desired .ckpt file and place it in
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the previous
section. Record the path to the weights file, e.g.
`models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
You will need to download the desired `.ckpt/.safetensors` file and
place it somewhere on your machine's filesystem. Alternatively, for a
`diffusers` model, record the repo_id or download the whole model
directory. Then using a **text** editor (e.g. the Windows Notepad
application), open the file `configs/models.yaml`, and add a new
stanza that follows this model:
Then using a **text** editor (e.g. the Windows Notepad application), open the
file `configs/models.yaml`, and add a new stanza that follows this model:
#### A legacy model
A legacy `.ckpt` or `.safetensors` entry will look like this:
```yaml
arabian-nights-1.0:
description: A great fine-tune in Arabian Nights style
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
weights: ./path/to/arabian-nights-1.0.ckpt
config: ./configs/stable-diffusion/v1-inference.yaml
format: ckpt
width: 512
height: 512
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
default: false
```
| name | description |
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
| description | Any description that you want to add to the model to remind you what it is. |
| weights | Relative path to the .ckpt weights file for this model. |
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
| vae | If you want to add a VAE file to the model, then enter its path here. |
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
#### A diffusers model
A stanza for a `diffusers` model will look like this for a HuggingFace
model with a repository ID:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
repo_id: captahab/arabian-nights-1.1
format: diffusers
default: true
```
And for a downloaded directory:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
path: /path/to/captahab-arabian-nights-1.1
format: diffusers
default: true
```
There is additional syntax for indicating an external VAE to use with
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
After you save the modified `models.yaml` file relaunch
`invokeai`. The new model will now be available for your use.
### Installation via the WebUI
To access the WebUI Model Manager, click on the button that looks like
a cute in the upper right side of the browser screen. This will bring
up a dialogue that lists the models you have already installed, and
allows you to load, delete or edit them:
<figure markdown>
![model-manager](../assets/installing-models/webui-models-1.png)
</figure>
To add a new model, click on **+ Add New** and select to either a
checkpoint/safetensors model, or a diffusers model:
<figure markdown>
![model-manager-add-new](../assets/installing-models/webui-models-2.png)
</figure>
In this example, we chose **Add Diffusers**. As shown in the figure
below, a new dialogue prompts you to enter the name to use for the
model, its description, and either the location of the `diffusers`
model on disk, or its Repo ID on the HuggingFace web site. If you
choose to enter a path to disk, the system will autocomplete for you
as you type:
<figure markdown>
![model-manager-add-diffusers](../assets/installing-models/webui-models-3.png)
</figure>
Press **Add Model** at the bottom of the dialogue (scrolled out of
site in the figure), and the model will be downloaded, imported, and
registered in `models.yaml`.
The **Add Checkpoint/Safetensor Model** option is similar, except that
in this case you can choose to scan an entire folder for
checkpoint/safetensors files to import. Simply type in the path of the
directory and press the "Search" icon. This will display the
`.ckpt` and `.safetensors` found inside the directory and its
subfolders, and allow you to choose which ones to import:
<figure markdown>
![model-manager-add-checkpoint](../assets/installing-models/webui-models-4.png)
</figure>
## Model Management Startup Options
The `invoke` launcher and the `invokeai` script accept a series of
command-line arguments that modify InvokeAI's behavior when loading
models. These can be provided on the command line, or added to the
InvokeAI root directory's `invokeai.init` initialization file.
The arguments are:
* `--model <model name>` -- Start up with the indicated model loaded
* `--ckpt_convert` -- When a checkpoint/safetensors model is loaded, convert it into a `diffusers` model in memory. This does not permanently save the converted model to disk.
* `--autoconvert <path/to/directory>` -- Scan the indicated directory path for new checkpoint/safetensors files, convert them into `diffusers` models, and import them into InvokeAI.
Here is an example of providing an argument on the command line using
the `invoke.sh` launch script:
```bash
invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
```
And here is what the same argument looks like in `invokeai.init`:
```
--outdir="/home/fred/invokeai/outputs
--no-nsfw_checker
--autoconvert /home/fred/stable-diffusion-checkpoints
```
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
available for your use.

View File

@@ -2,114 +2,110 @@
title: Installing PyPatchMatch
---
# :octicons-paintbrush-16: Installing PyPatchMatch
# :material-image-size-select-large: Installing PyPatchMatch
pypatchmatch is a Python module for inpainting images. It is not
needed to run InvokeAI, but it greatly improves the quality of
inpainting and outpainting and is recommended.
pypatchmatch is a Python module for inpainting images. It is not needed to run
InvokeAI, but it greatly improves the quality of inpainting and outpainting and
is recommended.
Unfortunately, it is a C++ optimized module and installation
can be somewhat challenging. This guide leads you through the steps.
Unfortunately, it is a C++ optimized module and installation can be somewhat
challenging. This guide leads you through the steps.
## Windows
You're in luck! On Windows platforms PyPatchMatch will install
automatically on Windows systems with no extra intervention.
You're in luck! On Windows platforms PyPatchMatch will install automatically on
Windows systems with no extra intervention.
## Macintosh
PyPatchMatch is not currently supported, but the team is working on
it.
You need to have opencv installed so that pypatchmatch can be built:
```bash
brew install opencv
```
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
## Linux
Prior to installing PyPatchMatch, you need to take the following
steps:
Prior to installing PyPatchMatch, you need to take the following steps:
### Debian Based Distros
1. Install the `build-essential` tools:
```
sudo apt update
sudo apt install build-essential
```
```sh
sudo apt update
sudo apt install build-essential
```
2. Install `opencv`:
```
sudo apt install python3-opencv libopencv-dev
```
```sh
sudo apt install python3-opencv libopencv-dev
```
3. Fix the naming of the `opencv` package configuration file:
3. Activate the environment you use for invokeai, either with `conda` or with a
virtual environment.
```
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
ln -sf opencv4.pc opencv.pc
```
4. Install pypatchmatch:
4. Activate the environment you use for invokeai, either with
`conda` or with a virtual environment.
```sh
pip install pypatchmatch
```
5. Do a "develop" install of pypatchmatch:
```
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
```
6. Confirm that pypatchmatch is installed.
At the command-line prompt enter `python`, and
then at the `>>>` line type `from patchmatch import patch_match`:
It should look like the follwing:
```
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from patchmatch import patch_match
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
rm -rf build/obj libpatchmatch.so
mkdir: created directory 'build/obj'
mkdir: created directory 'build/obj/csrc/'
[dep] csrc/masked_image.cpp ...
[dep] csrc/nnf.cpp ...
[dep] csrc/inpaint.cpp ...
[dep] csrc/pyinterface.cpp ...
[CC] csrc/pyinterface.cpp ...
[CC] csrc/inpaint.cpp ...
[CC] csrc/nnf.cpp ...
[CC] csrc/masked_image.cpp ...
[link] libpatchmatch.so ...
```
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
`python`, and then at the `>>>` line type
`from patchmatch import patch_match`: It should look like the follwing:
```py
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from patchmatch import patch_match
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
rm -rf build/obj libpatchmatch.so
mkdir: created directory 'build/obj'
mkdir: created directory 'build/obj/csrc/'
[dep] csrc/masked_image.cpp ...
[dep] csrc/nnf.cpp ...
[dep] csrc/inpaint.cpp ...
[dep] csrc/pyinterface.cpp ...
[CC] csrc/pyinterface.cpp ...
[CC] csrc/inpaint.cpp ...
[CC] csrc/nnf.cpp ...
[CC] csrc/masked_image.cpp ...
[link] libpatchmatch.so ...
```
### Arch Based Distros
1. Install the `base-devel` package:
```
sudo pacman -Syu
sudo pacman -S --needed base-devel
```
```sh
sudo pacman -Syu
sudo pacman -S --needed base-devel
```
2. Install `opencv`:
```
sudo pacman -S opencv
```
or for CUDA support
```
sudo pacman -S opencv-cuda
```
```sh
sudo pacman -S opencv
```
or for CUDA support
```sh
sudo pacman -S opencv-cuda
```
3. Fix the naming of the `opencv` package configuration file:
```
cd /usr/lib/pkgconfig/
ln -sf opencv4.pc opencv.pc
```
**Next, Follow Steps 4-6 from the Debian Section above**
```sh
cd /usr/lib/pkgconfig/
ln -sf opencv4.pc opencv.pc
```
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
If you see no errors, then you're ready to go!

View File

@@ -0,0 +1,206 @@
---
title: Installing xFormers
---
# :material-image-size-select-large: Installing xformers
xFormers is toolbox that integrates with the pyTorch and CUDA
libraries to provide accelerated performance and reduced memory
consumption for applications using the transformers machine learning
architecture. After installing xFormers, InvokeAI users who have
CUDA GPUs will see a noticeable decrease in GPU memory consumption and
an increase in speed.
xFormers can be installed into a working InvokeAI installation without
any code changes or other updates. This document explains how to
install xFormers.
## Pip Install
For both Windows and Linux, you can install `xformers` in just a
couple of steps from the command line.
If you are used to launching `invoke.sh` or `invoke.bat` to start
InvokeAI, then run the launcher and select the "developer's console"
to get to the command line. If you run invoke.py directly from the
command line, then just be sure to activate it's virtual environment.
Then run the following three commands:
```sh
pip install xformers==0.0.16rc425
pip install triton
python -m xformers.info output
```
The first command installs `xformers`, the second installs the
`triton` training accelerator, and the third prints out the `xformers`
installation status. If all goes well, you'll see a report like the
following:
```sh
xFormers 0.0.16rc425
memory_efficient_attention.cutlassF: available
memory_efficient_attention.cutlassB: available
memory_efficient_attention.flshattF: available
memory_efficient_attention.flshattB: available
memory_efficient_attention.smallkF: available
memory_efficient_attention.smallkB: available
memory_efficient_attention.tritonflashattF: available
memory_efficient_attention.tritonflashattB: available
swiglu.fused.p.cpp: available
is_triton_available: True
is_functorch_available: False
pytorch.version: 1.13.1+cu117
pytorch.cuda: available
gpu.compute_capability: 8.6
gpu.name: NVIDIA RTX A2000 12GB
build.info: available
build.cuda_version: 1107
build.python_version: 3.10.9
build.torch_version: 1.13.1+cu117
build.env.TORCH_CUDA_ARCH_LIST: 5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6
build.env.XFORMERS_BUILD_TYPE: Release
build.env.XFORMERS_ENABLE_DEBUG_ASSERTIONS: None
build.env.NVCC_FLAGS: None
build.env.XFORMERS_PACKAGE_FROM: wheel-v0.0.16rc425
source.privacy: open source
```
## Source Builds
`xformers` is currently under active development and at some point you
may wish to build it from sourcce to get the latest features and
bugfixes.
### Source Build on Linux
Note that xFormers only works with true NVIDIA GPUs and will not work
properly with the ROCm driver for AMD acceleration.
xFormers is not currently available as a pip binary wheel and must be
installed from source. These instructions were written for a system
running Ubuntu 22.04, but other Linux distributions should be able to
adapt this recipe.
#### 1. Install CUDA Toolkit 11.7
You will need the CUDA developer's toolkit in order to compile and
install xFormers. **Do not try to install Ubuntu's nvidia-cuda-toolkit
package.** It is out of date and will cause conflicts among the NVIDIA
driver and binaries. Instead install the CUDA Toolkit package provided
by NVIDIA itself. Go to [CUDA Toolkit 11.7
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive)
and use the target selection wizard to choose your platform and Linux
distribution. Select an installer type of "runfile (local)" at the
last step.
This will provide you with a recipe for downloading and running a
install shell script that will install the toolkit and drivers. For
example, the install script recipe for Ubuntu 22.04 running on a
x86_64 system is:
```
wget https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_515.43.04_linux.run
sudo sh cuda_11.7.0_515.43.04_linux.run
```
Rather than cut-and-paste this example, We recommend that you walk
through the toolkit wizard in order to get the most up to date
installer for your system.
#### 2. Confirm/Install pyTorch 1.13 with CUDA 11.7 support
If you are using InvokeAI 2.3 or higher, these will already be
installed. If not, you can check whether you have the needed libraries
using a quick command. Activate the invokeai virtual environment,
either by entering the "developer's console", or manually with a
command similar to `source ~/invokeai/.venv/bin/activate` (depending
on where your `invokeai` directory is.
Then run the command:
```sh
python -c 'exec("import torch\nprint(torch.__version__)")'
```
If it prints __1.13.1+cu117__ you're good. If not, you can install the
most up to date libraries with this command:
```sh
pip install --upgrade --force-reinstall torch torchvision
```
#### 3. Install the triton module
This module isn't necessary for xFormers image inference optimization,
but avoids a startup warning.
```sh
pip install triton
```
#### 4. Install source code build prerequisites
To build xFormers from source, you will need the `build-essentials`
package. If you don't have it installed already, run:
```sh
sudo apt install build-essential
```
#### 5. Build xFormers
There is no pip wheel package for xFormers at this time (January
2023). Although there is a conda package, InvokeAI no longer
officially supports conda installations and you're on your own if you
wish to try this route.
Following the recipe provided at the [xFormers GitHub
page](https://github.com/facebookresearch/xformers), and with the
InvokeAI virtual environment active (see step 1) run the following
commands:
```sh
pip install ninja
export TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.2;7.5;8.0;8.6"
pip install -v -U git+https://github.com/facebookresearch/xformers.git@main#egg=xformers
```
The TORCH_CUDA_ARCH_LIST is a list of GPU architectures to compile
xFormer support for. You can speed up compilation by selecting
the architecture specific for your system. You'll find the list of
GPUs and their architectures at NVIDIA's [GPU Compute
Capability](https://developer.nvidia.com/cuda-gpus) table.
If the compile and install completes successfully, you can check that
xFormers is installed with this command:
```sh
python -m xformers.info
```
If suiccessful, the top of the listing should indicate "available" for
each of the `memory_efficient_attention` modules, as shown here:
```sh
memory_efficient_attention.cutlassF: available
memory_efficient_attention.cutlassB: available
memory_efficient_attention.flshattF: available
memory_efficient_attention.flshattB: available
memory_efficient_attention.smallkF: available
memory_efficient_attention.smallkB: available
memory_efficient_attention.tritonflashattF: available
memory_efficient_attention.tritonflashattB: available
[...]
```
You can now launch InvokeAI and enjoy the benefits of xFormers.
### Windows
To come
---
(c) Copyright 2023 Lincoln Stein and the InvokeAI Development Team

View File

@@ -1 +0,0 @@
010_INSTALL_AUTOMATED.md

View File

@@ -1,429 +0,0 @@
---
title: Manual Installation
---
<figure markdown>
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
</figure>
!!! warning "This is for advanced Users"
who are already experienced with using conda or pip
## Introduction
You have two choices for manual installation, the [first one](#Conda_method)
based on the Anaconda3 package manager (`conda`), and
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
commands and the PIP package manager. Both methods require you to enter commands
on the terminal, also known as the "console".
On Windows systems you are encouraged to install and use the
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
which provides compatibility with Linux and Mac shells and nice features such as
command-line completion.
### Conda method
1. Check that your system meets the
[hardware requirements](index.md#Hardware_Requirements) and has the
appropriate GPU drivers installed. In particular, if you are a Linux user
with an AMD GPU installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
of ROCm driver support on this platform.
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
information about the installed video card.
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
can skip this step.
2. You will need to install Anaconda3 and Git if they are not already
available. Use your operating system's preferred package manager, or
download the installers manually. You can find them here:
- [Anaconda3](https://www.anaconda.com/)
- [git](https://git-scm.com/downloads)
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
GitHub:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create InvokeAI folder where you will follow the rest of the
steps.
4. Enter the newly-created InvokeAI folder:
```bash
cd InvokeAI
```
From this step forward make sure that you are working in the InvokeAI
directory!
5. Select the appropriate environment file:
We have created a series of environment files suited for different operating
systems and GPU hardware. They are located in the
`environments-and-requirements` directory:
<figure markdown>
| filename | OS |
| :----------------------: | :----------------------------: |
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
| environment-mac.yml | Macintosh |
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
</figure>
Choose the appropriate environment file for your system and link or copy it
to `environment.yml` in InvokeAI's top-level directory. To do so, run
following command from the repository-root:
!!! Example ""
=== "Macintosh and Linux"
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
```bash
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
```
When this is done, confirm that a file `environment.yml` has been linked in
the InvokeAI root directory and that it points to the correct file in the
`environments-and-requirements`.
```bash
ls -la
```
=== "Windows"
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
```cmd
copy environments-and-requirements\environment-win-cuda.yml environment.yml
```
Afterwards verify that the file `environment.yml` has been created, either via the
explorer or by using the command `dir` from the terminal
```cmd
dir
```
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
6. Create the conda environment:
```bash
conda env update
```
This will create a new environment named `invokeai` and install all InvokeAI
dependencies into it. If something goes wrong you should take a look at
[troubleshooting](#troubleshooting).
7. Activate the `invokeai` environment:
In order to use the newly created environment you will first need to
activate it
```bash
conda activate invokeai
```
Your command-line prompt should change to indicate that `invokeai` is active
by prepending `(invokeai)`.
8. Pre-Load the model weights files:
!!! tip
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [here](INSTALLING_MODELS.md).
```bash
python scripts/configure_invokeai.py
```
The script `configure_invokeai.py` will interactively guide you through the
process of downloading and installing the weights files needed for InvokeAI.
Note that the main Stable Diffusion weights file is protected by a license
agreement that you have to agree to. The script will list the steps you need
to take to create an account on the site that hosts the weights files,
accept the agreement, and provide an access token that allows InvokeAI to
legally download and install the weights files.
If you get an error message about a module not being installed, check that
the `invokeai` environment is active and if not, repeat step 5.
9. Run the command-line- or the web- interface:
!!! example ""
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
=== "CLI"
```bash
python scripts/invoke.py
```
=== "local Webserver"
```bash
python scripts/invoke.py --web
```
=== "Public Webserver"
```bash
python scripts/invoke.py --web --host 0.0.0.0
```
If you choose the run the web interface, point your browser at
http://localhost:9090 in order to load the GUI.
10. Render away!
Browse the [features](../features/CLI.md) section to learn about all the things you
can do with InvokeAI.
Note that some GPUs are slow to warm up. In particular, when using an AMD
card with the ROCm driver, you may have to wait for over a minute the first
time you try to generate an image. Fortunately, after the warm up period
rendering will be fast.
11. Subsequently, to relaunch the script, be sure to run "conda activate
invokeai", enter the `InvokeAI` directory, and then launch the invoke
script. If you forget to activate the 'invokeai' environment, the script
will fail with multiple `ModuleNotFound` errors.
## Updating to newer versions of the script
This distribution is changing rapidly. If you used the `git clone` method
(step 5) to download the InvokeAI directory, then to update to the latest and
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
```bash
git pull
conda env update
python scripts/configure_invokeai.py --no-interactive #optional
```
This will bring your local copy into sync with the remote one. The last step may
be needed to take advantage of new features or released models. The
`--no-interactive` flag will prevent the script from prompting you to download
the big Stable Diffusion weights files.
## pip Install
To install InvokeAI with only the PIP package manager, please follow these
steps:
1. Make sure you are using Python 3.9 or higher. The rest of the install
procedure depends on this:
```bash
python -V
```
2. Install the `virtualenv` tool if you don't have it already:
```bash
pip install virtualenv
```
3. From within the InvokeAI top-level directory, create and activate a virtual
environment named `invokeai`:
```bash
virtualenv invokeai
source invokeai/bin/activate
```
4. Pick the correct `requirements*.txt` file for your hardware and operating
system.
We have created a series of environment files suited for different operating
systems and GPU hardware. They are located in the
`environments-and-requirements` directory:
<figure markdown>
| filename | OS |
| :---------------------------------: | :-------------------------------------------------------------: |
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
| requirements-lin-arm64.txt | Linux running on arm64 systems |
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
</figure>
Select the appropriate requirements file, and make a link to it from
`requirements.txt` in the top-level InvokeAI directory. The command to do
this from the top-level directory is:
!!! example ""
=== "Macintosh and Linux"
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
```bash
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
```
=== "Windows"
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
```cmd
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
```
!!! warning
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
This is a base requirements file that does not have the platform-specific
libraries. Also, be sure to link or copy the platform-specific file to
a top-level file named `requirements.txt` as shown here. Running pip on
a requirements file in a subdirectory will not work as expected.
When this is done, confirm that a file named `requirements.txt` has been
created in the InvokeAI root directory and that it points to the correct
file in `environments-and-requirements`.
5. Run PIP
Be sure that the `invokeai` environment is active before doing this:
```bash
pip install --prefer-binary -r requirements.txt
```
---
## Troubleshooting
Here are some common issues and their suggested solutions.
### Conda
#### Conda fails before completing `conda update`
The usual source of these errors is a package incompatibility. While we have
tried to minimize these, over time packages get updated and sometimes introduce
incompatibilities.
We suggest that you search
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
You may also try to install the broken packages manually using PIP. To do this,
activate the `invokeai` environment, and run `pip install` with the name and
version of the package that is causing the incompatibility. For example:
```bash
pip install test-tube==0.7.5
```
You can keep doing this until all requirements are satisfied and the `invoke.py`
script runs without errors. Please report to
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
to work around the problem so that others can benefit from your investigation.
### Create Conda Environment fails on MacOS
If conda create environment fails with lmdb error, this is most likely caused by Clang.
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
Start by installing additional XCode command line tools, followed by brew install llvm.
```bash
xcode-select --install
brew install llvm
```
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
This is usually due to an incomplete or corrupted Conda install. Make sure you
have linked to the correct environment file and run `conda update` again.
If the problem persists, a more extreme measure is to clear Conda's caches and
remove the `invokeai` environment:
```bash
conda deactivate
conda env remove -n invokeai
conda clean -a
conda update
```
This removes all cached library files, including ones that may have been
corrupted somehow. (This is not supposed to happen, but does anyway).
#### `invoke.py` crashes at a later stage
If the CLI or web site had been working ok, but something unexpected happens
later on during the session, you've encountered a code bug that is probably
unrelated to an install issue. Please search
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
#### My renders are running very slowly
You may have installed the wrong torch (machine learning) package, and the
system is running on CPU rather than the GPU. To check, look at the log messages
that appear when `invoke.py` is first starting up. One of the earlier lines
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
and on Macintoshes, it should say "mps". If instead the message says it is
running on "cpu", then you may need to install the correct torch library.
You may be able to fix this by installing a different torch library. Here are
the magic incantations for Conda and PIP.
!!! todo "For CUDA systems"
- conda
```bash
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
```
- pip
```bash
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
```
!!! todo "For AMD systems"
- conda
```bash
conda activate invokeai
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
```
- pip
```bash
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
```
More information and troubleshooting tips can be found at https://pytorch.org.

View File

@@ -3,7 +3,19 @@ title: Overview
---
We offer several ways to install InvokeAI, each one suited to your
experience and preferences.
experience and preferences. We suggest that everyone start by
reviewing the
[hardware](010_INSTALL_AUTOMATED.md#hardware_requirements) and
[software](010_INSTALL_AUTOMATED.md#software_requirements)
requirements, as they are the same across each install method. Then
pick the install method most suitable to your level of experience and
needs.
See the [troubleshooting
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
install guide for frequently-encountered installation issues.
## Main Application
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
@@ -19,6 +31,8 @@ experience and preferences.
those who prefer the `conda` tool, and one suited to those who prefer
`pip` and Python virtual environments. In our hands the pip install
is faster and more reliable, but your mileage may vary.
Note that the conda installation method is currently deprecated and
will not be supported at some point in the future.
This method is recommended for users who have previously used `conda`
or `pip` in the past, developers, and anyone who wishes to remain on
@@ -31,3 +45,10 @@ experience and preferences.
InvokeAI and its dependencies. This method is recommended for
individuals with experience with Docker containers and understand
the pluses and minuses of a container-based install.
## Quick Guides
* [Installing CUDA and ROCm Drivers](./030_INSTALL_CUDA_AND_ROCM.md)
* [Installing XFormers](./070_INSTALL_XFORMERS.md)
* [Installing PyPatchMatch](./060_INSTALL_PATCHMATCH.md)
* [Installing New Models](./050_INSTALLING_MODELS.md)

View File

@@ -23,9 +23,11 @@ We thank them for all of their time and hard work.
* @damian0815 - Attention Systems and Gameplay Engineer
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
* @keturn - Lead for Diffusers port
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
* @genomancer (Gregg Helt) - Model training and merging
## **Contributions by**

View File

@@ -1,45 +0,0 @@
name: invokeai
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- albumentations=0.4.3
- cudatoolkit
- einops=0.3.0
- eventlet
- flask-socketio=5.3.0
- flask=2.1.*
- flask_cors=3.0.10
- imageio-ffmpeg=0.4.2
- imageio=2.9.0
- kornia=0.6
- numpy=1.19
- opencv=4.6.0
- pillow=8.*
- pip>=22.2.2
- pudb=2019.2
- python=3.9.*
- pytorch
- pytorch-lightning=1.7.7
- send2trash=1.8.0
- streamlit
- tokenizers>=0.11.1,!=0.11.3,<0.13
- torch-fidelity=0.3.0
- torchmetrics=0.7.0
- torchvision
- transformers=4.21.3
- pip:
- getpass_asterisk
- omegaconf==2.1.1
- picklescan
- pyreadline3
- realesrgan
- taming-transformers-rom1504
- test-tube>=0.7.5
- git+https://github.com/openai/CLIP.git@main#egg=clip
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .

View File

@@ -1,47 +0,0 @@
name: invokeai
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- python=3.9.*
- pip=22.2.2
- numpy=1.23.3
- pip:
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
- albumentations==0.4.3
- diffusers==0.6.0
- einops==0.3.0
- eventlet
- flask==2.1.3
- flask_cors==3.0.10
- flask_socketio==5.3.0
- getpass_asterisk
- imageio-ffmpeg==0.4.2
- imageio==2.9.0
- kornia==0.6.0
- omegaconf==2.2.3
- opencv-python==4.5.5.64
- picklescan
- pillow==9.2.0
- pudb==2019.2
- pyreadline3
- pytorch-lightning==1.7.7
- realesrgan
- send2trash==1.8.0
- streamlit==1.12.0
- taming-transformers-rom1504
- test-tube>=0.7.5
- tqdm
- torch
- torch-fidelity==0.3.0
- torchaudio
- torchmetrics==0.7.0
- torchvision
- transformers==4.21.3
- git+https://github.com/openai/CLIP.git@main#egg=clip
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .

View File

@@ -1,46 +0,0 @@
name: invokeai
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- python=3.9.*
- pip=22.2.2
- numpy=1.23.3
- torchvision=0.13.1
- torchaudio=0.12.1
- pytorch=1.12.1
- cudatoolkit=11.6
- pip:
- albumentations==0.4.3
- diffusers==0.6.0
- einops==0.3.0
- eventlet
- flask==2.1.3
- flask_cors==3.0.10
- flask_socketio==5.3.0
- getpass_asterisk
- imageio-ffmpeg==0.4.2
- imageio==2.9.0
- kornia==0.6.0
- omegaconf==2.2.3
- opencv-python==4.5.5.64
- picklescan
- pillow==9.2.0
- pudb==2019.2
- pyreadline3
- pytorch-lightning==1.7.7
- realesrgan
- send2trash==1.8.0
- streamlit==1.12.0
- taming-transformers-rom1504
- test-tube>=0.7.5
- torch-fidelity==0.3.0
- torchmetrics==0.7.0
- transformers==4.21.3
- git+https://github.com/openai/CLIP.git@main#egg=clip
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .

View File

@@ -1,65 +0,0 @@
name: invokeai
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- python=3.10
- pip>=22.2
- pytorch=1.12
- pytorch-lightning=1.7
- torchvision=0.13
- torchmetrics=0.10
- torch-fidelity=0.3
# I suggest to keep the other deps sorted for convenience.
# To determine what the latest versions should be, run:
#
# ```shell
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
# ```
- albumentations=1.2
- coloredlogs=15.0
- diffusers=0.6
- einops=0.3
- eventlet
- grpcio=1.46
- flask=2.1
- flask-socketio=5.3
- flask-cors=3.0
- humanfriendly=10.0
- imageio=2.21
- imageio-ffmpeg=0.4
- imgaug=0.4
- kornia=0.6
- mpmath=1.2
- nomkl=3
- numpy=1.23
- omegaconf=2.1
- openh264=2.3
- onnx=1.12
- onnxruntime=1.12
- pudb=2019.2
- protobuf=3.20
- py-opencv=4.6
- scipy=1.9
- streamlit=1.12
- sympy=1.10
- send2trash=1.8
- tensorboard=2.10
- transformers=4.23
- pip:
- getpass_asterisk
- picklescan
- taming-transformers-rom1504
- test-tube==0.7.5
- git+https://github.com/openai/CLIP.git@main#egg=clip
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .
variables:
PYTORCH_ENABLE_MPS_FALLBACK: 1

View File

@@ -1,46 +0,0 @@
name: invokeai
channels:
- pytorch
- conda-forge
- defaults
dependencies:
- python=3.10.*
- pip=22.2.2
- numpy=1.23.3
- torchvision=0.13.1
- torchaudio=0.12.1
- pytorch=1.12.1
- cudatoolkit=11.6
- pip:
- albumentations==0.4.3
- diffusers==0.6.0
- einops==0.3.0
- eventlet
- flask==2.1.3
- flask_cors==3.0.10
- flask_socketio==5.3.0
- getpass_asterisk
- imageio-ffmpeg==0.4.2
- imageio==2.9.0
- kornia==0.6.0
- omegaconf==2.2.3
- opencv-python==4.5.5.64
- picklescan
- pillow==9.2.0
- pudb==2019.2
- pyreadline3
- pytorch-lightning==1.7.7
- realesrgan
- send2trash==1.8.0
- streamlit==1.12.0
- taming-transformers-rom1504
- test-tube>=0.7.5
- torch-fidelity==0.3.0
- torchmetrics==0.7.0
- transformers==4.21.3
- git+https://github.com/openai/CLIP.git@main#egg=clip
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .

View File

@@ -1,39 +0,0 @@
# pip will resolve the version which matches torch
albumentations
diffusers==0.10.*
einops
eventlet
facexlib
flask==2.1.3
flask_cors==3.0.10
flask_socketio==5.3.0
flaskwebgui==1.0.3
getpass_asterisk
gfpgan==1.3.8
huggingface-hub
imageio
imageio-ffmpeg
kornia
numpy==1.23.*
omegaconf
opencv-python
picklescan
pillow
pip>=22
pudb
pyreadline3
pytorch-lightning==1.7.7
realesrgan
requests==2.25.1
scikit-image>=0.19
send2trash
streamlit
taming-transformers-rom1504
test-tube>=0.7.5
torch-fidelity
torchmetrics
transformers==4.25.*
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg

View File

@@ -1,6 +0,0 @@
-r environments-and-requirements/requirements-base.txt
# Get hardware-appropriate torch/torchvision
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
torch
torchvision
-e .

View File

@@ -1,3 +0,0 @@
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
-r environments-and-requirements/requirements-base.txt
-e .

View File

@@ -1,5 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
-r environments-and-requirements/requirements-base.txt
torch
torchvision
-e .

View File

@@ -1,6 +0,0 @@
-r environments-and-requirements/requirements-base.txt
grpcio<1.51.0
protobuf==3.19.6
torch<1.13.0
torchvision<0.14.0
-e .

View File

@@ -1,6 +0,0 @@
-r environments-and-requirements/requirements-base.txt
# Get hardware-appropriate torch/torchvision
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
torch==1.12.1
torchvision==0.13.1
-e .

View File

@@ -1,13 +0,0 @@
module.exports = {
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:react-hooks/recommended',
],
parser: '@typescript-eslint/parser',
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
root: true,
rules: {
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
},
};

View File

@@ -1,28 +0,0 @@
# Stable Diffusion Web UI
## Run
- `python scripts/dream.py --web` serves both frontend and backend at
http://localhost:9090
## Evironment
Install [node](https://nodejs.org/en/download/) (includes npm) and optionally
[yarn](https://yarnpkg.com/getting-started/install).
From `frontend/` run `npm install` / `yarn install` to install the frontend
packages.
## Dev
1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server.
2. Run `python scripts/dream.py --web`.
3. Navigate to the dev server address e.g. `http://localhost:5173/`.
To build for dev: `npm build-dev` / `yarn build-dev`
To build for production: `npm build` / `yarn build`
## TODO
- Search repo for "TODO"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,23 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<script type="module" crossorigin src="./assets/polyfills.1ff60148.js"></script>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
<script type="module" crossorigin src="./assets/index.ec2d89c6.js"></script>
<link rel="stylesheet" href="./assets/index.0dadf5d0.css">
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
</head>
<body>
<div id="root"></div>
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-5c5a479d.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
</body>
</html>

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1,23 +0,0 @@
{
"eslintConfig": {
"extends": [
"eslint:recommended",
"plugin:@typescript-eslint/recommended",
"plugin:react-hooks/recommended"
],
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint", "eslint-plugin-react-hooks"],
"root": true,
"settings": {
"import/resolver": {
"node": {
"paths": ["src"],
"extensions": [".js", ".jsx", ".ts", ".tsx"]
}
}
},
"rules": {
"react/jsx-filename-extension": [1, { "extensions": [".tsx", ".ts"] }]
}
}
}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1,66 +0,0 @@
import React, { ChangeEvent } from 'react';
import { useAppDispatch, useAppSelector } from '../../../../../app/storeHooks';
import _ from 'lodash';
import { createSelector } from '@reduxjs/toolkit';
import IAISwitch from '../../../../../common/components/IAISwitch';
import IAISlider from '../../../../../common/components/IAISlider';
import { Flex } from '@chakra-ui/react';
import {
setInpaintReplace,
setShouldUseInpaintReplace,
} from 'features/canvas/store/canvasSlice';
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
import { useTranslation } from 'react-i18next';
const selector = createSelector(
canvasSelector,
(canvas) => {
const { inpaintReplace, shouldUseInpaintReplace } = canvas;
return {
inpaintReplace,
shouldUseInpaintReplace,
};
},
{
memoizeOptions: {
resultEqualityCheck: _.isEqual,
},
}
);
export default function InpaintReplace() {
const { inpaintReplace, shouldUseInpaintReplace } = useAppSelector(selector);
const dispatch = useAppDispatch();
const { t } = useTranslation();
return (
<Flex alignItems={'center'} columnGap={'1rem'}>
<IAISlider
label={t('options:inpaintReplace')}
value={inpaintReplace}
onChange={(v: number) => {
dispatch(setInpaintReplace(v));
}}
min={0}
max={1.0}
step={0.05}
isInteger={false}
isSliderDisabled={!shouldUseInpaintReplace}
withSliderMarks
sliderMarkRightOffset={-2}
withReset
handleReset={() => dispatch(setInpaintReplace(1))}
isResetDisabled={!shouldUseInpaintReplace}
/>
<IAISwitch
isChecked={shouldUseInpaintReplace}
onChange={(e: ChangeEvent<HTMLInputElement>) =>
dispatch(setShouldUseInpaintReplace(e.target.checked))
}
marginTop="1.25rem"
/>
</Flex>
);
}

View File

@@ -1,113 +0,0 @@
import { Flex } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISlider from 'common/components/IAISlider';
import { optionsSelector } from 'features/options/store/optionsSelectors';
import {
setSeamBlur,
setSeamSize,
setSeamSteps,
setSeamStrength,
} from 'features/options/store/optionsSlice';
import _ from 'lodash';
import { useTranslation } from 'react-i18next';
const selector = createSelector(
[optionsSelector],
(options) => {
const { seamSize, seamBlur, seamStrength, seamSteps } = options;
return {
seamSize,
seamBlur,
seamStrength,
seamSteps,
};
},
{
memoizeOptions: {
resultEqualityCheck: _.isEqual,
},
}
);
const SeamCorrectionOptions = () => {
const dispatch = useAppDispatch();
const { seamSize, seamBlur, seamStrength, seamSteps } =
useAppSelector(selector);
const { t } = useTranslation();
return (
<Flex direction="column" gap="1rem">
<IAISlider
sliderMarkRightOffset={-6}
label={t('options:seamSize')}
min={1}
max={256}
sliderNumberInputProps={{ max: 512 }}
value={seamSize}
onChange={(v) => {
dispatch(setSeamSize(v));
}}
handleReset={() => dispatch(setSeamSize(96))}
withInput
withSliderMarks
withReset
/>
<IAISlider
sliderMarkRightOffset={-4}
label={t('options:seamBlur')}
min={0}
max={64}
sliderNumberInputProps={{ max: 512 }}
value={seamBlur}
onChange={(v) => {
dispatch(setSeamBlur(v));
}}
handleReset={() => {
dispatch(setSeamBlur(16));
}}
withInput
withSliderMarks
withReset
/>
<IAISlider
sliderMarkRightOffset={-7}
label={t('options:seamStrength')}
min={0.01}
max={0.99}
step={0.01}
value={seamStrength}
onChange={(v) => {
dispatch(setSeamStrength(v));
}}
handleReset={() => {
dispatch(setSeamStrength(0.7));
}}
withInput
withSliderMarks
withReset
/>
<IAISlider
sliderMarkRightOffset={-4}
label={t('options:seamSteps')}
min={1}
max={32}
sliderNumberInputProps={{ max: 100 }}
value={seamSteps}
onChange={(v) => {
dispatch(setSeamSteps(v));
}}
handleReset={() => {
dispatch(setSeamSteps(10));
}}
withInput
withSliderMarks
withReset
/>
</Flex>
);
};
export default SeamCorrectionOptions;

View File

@@ -1,34 +0,0 @@
import { Flex } from '@chakra-ui/react';
import { ChangeEvent } from 'react';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISwitch from 'common/components/IAISwitch';
import { setHiresFix } from 'features/options/store/optionsSlice';
import { useTranslation } from 'react-i18next';
/**
* Hires Fix Toggle
*/
const HiresOptions = () => {
const dispatch = useAppDispatch();
const hiresFix = useAppSelector((state: RootState) => state.options.hiresFix);
const { t } = useTranslation();
const handleChangeHiresFix = (e: ChangeEvent<HTMLInputElement>) =>
dispatch(setHiresFix(e.target.checked));
return (
<Flex gap={2} direction={'column'}>
<IAISwitch
label={t('options:hiresOptim')}
fontSize={'md'}
isChecked={hiresFix}
onChange={handleChangeHiresFix}
/>
</Flex>
);
};
export default HiresOptions;

View File

@@ -1,12 +0,0 @@
import { Flex } from '@chakra-ui/react';
import SeamlessOptions from './SeamlessOptions';
const ImageToImageOutputOptions = () => {
return (
<Flex gap={2} direction={'column'}>
<SeamlessOptions />
</Flex>
);
};
export default ImageToImageOutputOptions;

Some files were not shown because too many files have changed in this diff Show More