From 318eb35ea0d3f9aa30762b4dfa2368ae1ebbb0f3 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 15:29:04 -0400 Subject: [PATCH 01/30] run make_schedule() if it hasn't already been called - fixes #984 --- ldm/models/diffusion/ksampler.py | 8 ++++++++ ldm/models/diffusion/sampler.py | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/ldm/models/diffusion/ksampler.py b/ldm/models/diffusion/ksampler.py index a947296f9e..1e44fe1fdf 100644 --- a/ldm/models/diffusion/ksampler.py +++ b/ldm/models/diffusion/ksampler.py @@ -171,6 +171,14 @@ class KSampler(Sampler): if img_callback is not None: img_callback(k_callback_values['x'],k_callback_values['i']) + # if make_schedule() hasn't been called, we do it now + if self.sigmas is None: + self.make_schedule( + ddim_num_steps=S, + ddim_eta = eta, + verbose = False, + ) + # sigmas are set up in make_schedule - we take the last steps items total_steps = len(self.sigmas) sigmas = self.sigmas[-S-1:] diff --git a/ldm/models/diffusion/sampler.py b/ldm/models/diffusion/sampler.py index 8b19f894e5..88cdc01974 100644 --- a/ldm/models/diffusion/sampler.py +++ b/ldm/models/diffusion/sampler.py @@ -20,6 +20,7 @@ from ldm.modules.diffusionmodules.util import ( class Sampler(object): def __init__(self, model, schedule='linear', steps=None, device=None, **kwargs): self.model = model + self.ddim_timesteps = None self.ddpm_num_timesteps = steps self.schedule = schedule self.device = device or choose_torch_device() @@ -157,6 +158,14 @@ class Sampler(object): **kwargs, ): + # check to see if make_schedule() has run, and if not, run it + if self.ddim_timesteps is None: + self.make_schedule( + ddim_num_steps=S, + ddim_eta = eta, + verbose = False, + ) + ts = self.get_timesteps(S) # sampling From 745c020aa27e79177ecff4f23f0c02ca874dc115 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 15:33:56 -0400 Subject: [PATCH 02/30] fix environment-mac.yml as per #964 --- docs/installation/INSTALL_MAC.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index 60ab3dc40d..282f6e87c3 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -92,10 +92,10 @@ ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt # BEGIN ARCHITECTURE-DEPENDENT STEP # # For M1: Create the environment & install packages -PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yml # For Intel: Create the environment & install packages -PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yaml +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yml # END ARCHITECTURE-DEPENDENT STEP # # Activate the environment (you need to do this every time you want to run SD) @@ -347,7 +347,7 @@ python scripts/preload_models.py ``` The InvokeAI version includes this fix in -[environment-mac.yaml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yaml). +[environment-mac.yml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yml). ### "Could not build wheels for tokenizers" From 773a64d4c0679062a452d42be00acf21d39dd5fa Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 15:37:45 -0400 Subject: [PATCH 03/30] fix references from lstein/stable-diffusion to invoke-ai/InvokeAI - as per #989 --- docs/index.md | 32 ++++++++++---------- notebooks/Stable_Diffusion_AI_Notebook.ipynb | 8 ++--- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/index.md b/docs/index.md index bd04545904..dc3bc35cec 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,24 +25,24 @@ template: main.html [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] -[CI checks on dev badge]: https://flat.badgen.net/github/checks/lstein/stable-diffusion/development?label=CI%20status%20on%20dev&cache=900&icon=github -[CI checks on dev link]: https://github.com/lstein/stable-diffusion/actions?query=branch%3Adevelopment -[CI checks on main badge]: https://flat.badgen.net/github/checks/lstein/stable-diffusion/main?label=CI%20status%20on%20main&cache=900&icon=github -[CI checks on main link]: https://github.com/lstein/stable-diffusion/actions/workflows/test-invoke-conda.yml +[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github +[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment +[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github +[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml [discord badge]: https://flat.badgen.net/discord/members/htRgbc7e?icon=discord [discord link]: https://discord.com/invite/htRgbc7e -[github forks badge]: https://flat.badgen.net/github/forks/lstein/stable-diffusion?icon=github +[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github [github forks link]: https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion -[github open issues badge]: https://flat.badgen.net/github/open-issues/lstein/stable-diffusion?icon=github -[github open issues link]: https://github.com/lstein/stable-diffusion/issues?q=is%3Aissue+is%3Aopen -[github open prs badge]: https://flat.badgen.net/github/open-prs/lstein/stable-diffusion?icon=github -[github open prs link]: https://github.com/lstein/stable-diffusion/pulls?q=is%3Apr+is%3Aopen -[github stars badge]: https://flat.badgen.net/github/stars/lstein/stable-diffusion?icon=github -[github stars link]: https://github.com/lstein/stable-diffusion/stargazers -[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/lstein/stable-diffusion/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900 -[latest commit to dev link]: https://github.com/lstein/stable-diffusion/commits/development -[latest release badge]: https://flat.badgen.net/github/release/lstein/stable-diffusion/development?icon=github -[latest release link]: https://github.com/lstein/stable-diffusion/releases +[github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github +[github open issues link]: https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen +[github open prs badge]: https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github +[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen +[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github +[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers +[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900 +[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development +[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github +[latest release link]: https://github.com/invoke-ai/InvokeAI/releases @@ -54,7 +54,7 @@ GPU cards with as little as 4 GB or RAM. !!! note This fork is rapidly evolving. Please use the - [Issues](https://github.com/lstein/stable-diffusion/issues) tab to report bugs and make feature + [Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster. ## :octicons-package-dependencies-24: Installation diff --git a/notebooks/Stable_Diffusion_AI_Notebook.ipynb b/notebooks/Stable_Diffusion_AI_Notebook.ipynb index 129323bb15..6766727c18 100644 --- a/notebooks/Stable_Diffusion_AI_Notebook.ipynb +++ b/notebooks/Stable_Diffusion_AI_Notebook.ipynb @@ -18,7 +18,7 @@ "---\n", "Note: It takes some time to load, but after installing all dependencies you can use the bot all time you want while colab instance is up.
\n", "Requirements: For this notebook to work you need to have [Stable-Diffusion-v-1-4](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) stored in your Google Drive, it will be needed in cell #7\n", - "##### For more details visit Github repository: [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion)\n", + "##### For more details visit Github repository: [invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)\n", "---\n" ] }, @@ -57,7 +57,7 @@ "#@title 2. Download stable-diffusion Repository\n", "from os.path import exists\n", "\n", - "!git clone --quiet https://github.com/lstein/stable-diffusion.git # Original repo\n", + "!git clone --quiet https://github.com/invoke-ai/InvokeAI.git # Original repo\n", "%cd /content/stable-diffusion/\n", "!git checkout --quiet tags/release-1.14.1" ] @@ -74,8 +74,8 @@ "#@title 3. Install dependencies\n", "import gc\n", "\n", - "!wget https://raw.githubusercontent.com/lstein/stable-diffusion/development/requirements.txt\n", - "!wget https://raw.githubusercontent.com/lstein/stable-diffusion/development/requirements-lin-win-colab-CUDA.txt\n", + "!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/requirements.txt\n", + "!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/requirements-lin-win-colab-CUDA.txt\n", "!pip install colab-xterm\n", "!pip install -r requirements-lin-win-colab-CUDA.txt\n", "!pip install clean-fid torchtext\n", From 6e9d996ece8b503bdf96fc4e9648a8180ea27665 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 16:36:00 -0400 Subject: [PATCH 04/30] add short list of 2.0.0 new features --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9606d30b9c..2d981ff226 100644 --- a/README.md +++ b/README.md @@ -136,8 +136,27 @@ you can try starting `invoke.py` with the `--precision=float32` flag: ### Latest Changes -- vNEXT (TODO 2022) +- v2.0.0 (9 October 2022) + - `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains + for backward compatibility. + - Completely new WebGUI - launch with `python3 scripts/invoke.py --web` + - Support for inpainting and outpainting + - img2img runs on all k* samplers + - Support for CodeFormer face reconstruction + - Support for Textual Inversion on Macintoshes + - Support in both WebGUI and CLI for post-processing of previously-generated images + using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas), + and "embiggen" upscaling. See the `!fix` command. + - New `--hires` option on `invoke>` line allows larger images to be created without + duplicating elements, at the cost of some performance. + - Extensive metadata now written into PNG files, allowing reliable regeneration of images + and tweaking of previous settings. + - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. + - Improved command-line completion behavior and new commands added: + * List command-line history with `!history` + * Search command-line history with `!search` + * Clear history with `!clear` - Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto configure. To switch away from auto use the new flag like `--precision=float32`. From 988ace80294daf4053c85cb2869f6805406fe899 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 16:39:36 -0400 Subject: [PATCH 05/30] add perlin noise to list of new features --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 2d981ff226..17eddf2d52 100644 --- a/README.md +++ b/README.md @@ -150,6 +150,8 @@ you can try starting `invoke.py` with the `--precision=float32` flag: and "embiggen" upscaling. See the `!fix` command. - New `--hires` option on `invoke>` line allows larger images to be created without duplicating elements, at the cost of some performance. + - New `--perlin` and `--threshold` options allow you to add and control variation + during image generation (see [docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options]) - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. From 8ff9c69e2fda1bbd13bdf8a87dba4ec6f0fe41bc Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 16:41:05 -0400 Subject: [PATCH 06/30] fix link error --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 17eddf2d52..050686d5b0 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag: - New `--hires` option on `invoke>` line allows larger images to be created without duplicating elements, at the cost of some performance. - New `--perlin` and `--threshold` options allow you to add and control variation - during image generation (see [docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options]) + during image generation (see [docs/features/OTHER.md]) - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. From 3bc1ff5e5a23d19aade515870d90dc09b322d79f Mon Sep 17 00:00:00 2001 From: Eric Wolf <19wolf@gmail.com> Date: Sun, 9 Oct 2022 16:48:50 -0400 Subject: [PATCH 07/30] fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 050686d5b0..256f604975 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ This is a fork of the open source text-to-image generator. It provides a streamlined process with various new features and options to aid the image generation process. It runs on Windows, Mac and Linux machines, with -GPU cards with as little as 4 GB or RAM. It provides both a polished +GPU cards with as little as 4 GB of RAM. It provides both a polished Web interface, and an easy-to-use command-line interface. _Note: This fork is rapidly evolving. Please use the From 7dae5fb1315279565b4453667cc7c461cf40e2ab Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 08:37:51 -0400 Subject: [PATCH 08/30] rebuild frontend --- frontend/dist/index.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/dist/index.html b/frontend/dist/index.html index 738f7b6922..0aa5cd8616 100644 --- a/frontend/dist/index.html +++ b/frontend/dist/index.html @@ -6,8 +6,8 @@ InvokeAI - A Stable Diffusion Toolkit - - + + From dc6e60cbcccaa3c9b6cbdbffa907db9aa457b628 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 1 Oct 2022 12:17:46 -0400 Subject: [PATCH 09/30] Update INPAINTING.md Changed Gimp instructions to indicate that partial transparency is better than full transparency. --- docs/features/INPAINTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/features/INPAINTING.md b/docs/features/INPAINTING.md index 32578a3dfc..38c7c8d397 100644 --- a/docs/features/INPAINTING.md +++ b/docs/features/INPAINTING.md @@ -38,8 +38,8 @@ We are hoping to get rid of the need for this workaround in an upcoming release. 2. Layer->Transparency->Add Alpha Channel 3. Use lasoo tool to select region to mask 4. Choose Select -> Float to create a floating selection -5. Open the Layers toolbar (++ctrl+l++) and select "Floating Selection" -6. Set opacity to 0% +5. Open the Layers toolbar (^L) and select "Floating Selection" +6. Set opacity to a value between 0% and 99% 7. Export as PNG 8. In the export dialogue, Make sure the "Save colour values from transparent pixels" checkbox is selected. From bc9471987bd193843d9f59ef5fb80340a506e487 Mon Sep 17 00:00:00 2001 From: rpagliuca Date: Sat, 1 Oct 2022 15:06:00 -0300 Subject: [PATCH 10/30] Update README.md Small writing error --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a108f4c172..aa778dcb56 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,12 @@ You wil need one of the following: - At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies. -#### Note +**Note** + +If you have a Nvidia 10xx series card (e.g. the 1080ti), please +run the dream script in full-precision mode as shown below. + +Similarly, specify full-precision mode on Apple M1 hardware. Precision is auto configured based on the device. If however you encounter errors like 'expected type Float but found Half' or 'not implemented for Half' From 58d0f14d033f2254b514f252711161203ec10dd7 Mon Sep 17 00:00:00 2001 From: Jim Hays Date: Fri, 30 Sep 2022 21:50:41 -0400 Subject: [PATCH 11/30] Update references to `lstein/stable-diffusion` to `invoke-ai/InvokeAI` --- docs/installation/INSTALL_MAC.md | 40 ++++++++++++++------------------ 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index 8345876476..0a40820288 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -95,10 +95,9 @@ While that is downloading, open a Terminal and run the following commands: ```{.bash .annotate title="local repo setup"} # clone the repo git clone https://github.com/invoke-ai/InvokeAI.git - cd InvokeAI -# wait until the checkpoint file has downloaded, then proceed +# Download the checkpoint file, and then proceed # create symlink to checkpoint mkdir -p models/ldm/stable-diffusion-v1/ @@ -172,13 +171,13 @@ python ./scripts/orig_scripts/txt2img.py \ ### Doesn't work anymore? -PyTorch nightly includes support for MPS. Because of this, this setup is -inherently unstable. One morning I woke up and it no longer worked no matter -what I did until I switched to miniforge. However, I have another Mac that works -just fine with Anaconda. If you can't get it to work, please search a little -first because many of the errors will get posted and solved. If you can't find a -solution please -[create an issue](https://github.com/invoke-ai/InvokeAI/issues). +PyTorch nightly includes support for MPS. Because of this, this setup +is inherently unstable. One morning I woke up and it no longer worked +no matter what I did until I switched to miniforge. However, I have +another Mac that works just fine with Anaconda. If you can't get it to +work, please search a little first because many of the errors will get +posted and solved. If you can't find a solution please [create an +issue](https://github.com/invoke-ai/InvokeAI/issues). One debugging step is to update to the latest version of PyTorch nightly. @@ -378,8 +377,8 @@ python scripts/preload_models.py WARNING: this will be slower than running natively on MPS. ``` -This fork already includes a fix for this in -[environment-mac.yml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yml). +The InvokeAI version includes this fix in +[environment-mac.yaml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yaml). ### "Could not build wheels for tokenizers" @@ -463,13 +462,10 @@ C. You don't have a virus. It's part of the project. Here's [Rick](https://github.com/invoke-ai/InvokeAI/blob/main/assets/rick.jpeg) -and here's -[the code](https://github.com/invoke-ai/InvokeAI/blob/69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc/scripts/txt2img.py#L79) -that swaps him in. It's a NSFW filter, which IMO, doesn't work very good (and we -call this "computer vision", sheesh). - -Actually, this could be happening because there's not enough RAM. You could try -the `model.half()` suggestion or specify smaller output images. +and here's [the +code](https://github.com/invoke-ai/InvokeAI/blob/69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc/scripts/txt2img.py#L79) +that swaps him in. It's a NSFW filter, which IMO, doesn't work very +good (and we call this "computer vision", sheesh). --- @@ -492,11 +488,9 @@ return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backen RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. ``` -Update to the latest version of invoke-ai/InvokeAI. We were patching -pytorch but we found a file in stable-diffusion that we could change instead. -This is a 32-bit vs 16-bit problem. - ---- +Update to the latest version of invoke-ai/InvokeAI. We were +patching pytorch but we found a file in stable-diffusion that we could +change instead. This is a 32-bit vs 16-bit problem. ### The processor must support the Intel bla bla bla From 7a98387e8d874f70c4e3e59ac75121443b4d2e91 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 4 Oct 2022 13:32:06 +1100 Subject: [PATCH 12/30] Updates INSTALL_MAC.md --- docs/installation/INSTALL_MAC.md | 175 +++++++++++++------------------ 1 file changed, 72 insertions(+), 103 deletions(-) diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index 0a40820288..3d8a282a80 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -2,142 +2,111 @@ title: macOS --- -# :fontawesome-brands-apple: macOS +Invoke AI runs quite well on M1 Macs and we have a number of M1 users +in the community. + +While the repo does run on Intel Macs, we only have a couple +reports. If you have an Intel Mac and run into issues, please create +an issue on Github and we will do our best to help. ## Requirements - macOS 12.3 Monterey or later -- Python -- Patience -- Apple Silicon or Intel Mac +- About 10GB of storage (and 10GB of data if your internet connection has data caps) +- Any M1 Macs or an Intel Macs with 4GB+ of VRAM (ideally more) -Things have moved really fast and so these instructions change often which makes -them outdated pretty fast. One of the problems is that there are so many -different ways to run this. +## Installation -We are trying to build a testing setup so that when we make changes it doesn't -always break. +First you need to download a large checkpoint file. -## How to +1. Sign up at https://huggingface.co +2. Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) +3. Accept the terms and click Access Repository +4. Download [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) and note where you have saved it (probably the Downloads folder). You may want to move it somewhere else for longer term storage - SD needs this file to run. -(this hasn't been 100% tested yet) +While that is downloading, open Terminal and run the following commands one at a time, reading the comments and taking care to run the appropriate command for your Mac's architecture (Intel or M1). -First get the weights checkpoint download started since it's big and will take -some time: +Do not just copy and paste the whole thing into your terminal! -1. Sign up at [huggingface.co](https://huggingface.co) -2. Go to the - [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) -3. Accept the terms and click Access Repository: -4. Download - [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) - and note where you have saved it (probably the Downloads folder) +```bash +# Install brew (and Xcode command line tools): +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -While that is downloading, open a Terminal and run the following commands: +# Now there are two options to get the Python (miniconda) environment up and running: +# 1. Alongside pyenv +# 2. Standalone +# +# If you don't know what we are talking about, choose 2. +# +# If you are familiar with python environments, you'll know there are other options +# for setting up the environment - you are on your own if you go one of those routes. -!!! todo "Homebrew" +##### BEGIN TWO DIFFERENT OPTIONS ##### - === "no brew installation yet" +### BEGIN OPTION 1: Installing alongside pyenv ### +brew install pyenv-virtualenv # you might have this from before, no problem +pyenv install anaconda3-2022.05 +pyenv virtualenv anaconda3-2022.05 +eval "$(pyenv init -)" +pyenv activate anaconda3-2022.05 +### END OPTION 1 ### - ```bash title="install brew (and Xcode command line tools)" - /bin/bash -c \ - "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - ``` - === "brew is already installed" - - Only if you installed protobuf in a previous version of this tutorial, otherwise skip +### BEGIN OPTION 2: Installing standalone ### +# Install cmake, protobuf, and rust: +brew install cmake protobuf rust - `#!bash brew uninstall protobuf` +# BEGIN ARCHITECTURE-DEPENDENT STEP # +# For M1: install miniconda (M1 arm64 version): +curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh +/bin/bash Miniconda3-latest-MacOSX-arm64.sh -!!! todo "Conda Installation" +# For Intel: install miniconda (Intel x86-64 version): +curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o Miniconda3-latest-MacOSX-x86_64.sh +/bin/bash Miniconda3-latest-MacOSX-x86_64.sh +# END ARCHITECTURE-DEPENDENT STEP # - Now there are two different ways to set up the Python (miniconda) environment: - 1. Standalone - 2. with pyenv - If you don't know what we are talking about, choose Standalone +### END OPTION 2 ### - === "Standalone" +##### END TWO DIFFERENT OPTIONS ##### - ```bash - # install cmake and rust: - brew install cmake rust - ``` - - === "M1 arm64" - - ```bash title="Install miniconda for M1 arm64" - curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh \ - -o Miniconda3-latest-MacOSX-arm64.sh - /bin/bash Miniconda3-latest-MacOSX-arm64.sh - ``` - - === "Intel x86_64" - - ```bash title="Install miniconda for Intel" - curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh \ - -o Miniconda3-latest-MacOSX-x86_64.sh - /bin/bash Miniconda3-latest-MacOSX-x86_64.sh - ``` - - === "with pyenv" - - ```{.bash .annotate} - brew install rust pyenv-virtualenv # (1)! - pyenv install anaconda3-2022.05 - pyenv virtualenv anaconda3-2022.05 - eval "$(pyenv init -)" - pyenv activate anaconda3-2022.05 - ``` - - 1. You might already have this installed, if that is the case just continue. - -```{.bash .annotate title="local repo setup"} -# clone the repo +# Clone the Invoke AI repo git clone https://github.com/invoke-ai/InvokeAI.git cd InvokeAI -# Download the checkpoint file, and then proceed +### WAIT FOR THE CHECKPOINT FILE TO DOWNLOAD, THEN PROCEED ### +>>>>>>> Updates INSTALL_MAC.md -# create symlink to checkpoint +# We will leave the big checkpoint wherever you stashed it for long-term storage, +# and make a link to it from the repo's folder. This allows you to use it for +# other repos, and if you need to delete Invoke AI, you won't have to download it again. + +# Make the directory in the repo for the symlink mkdir -p models/ldm/stable-diffusion-v1/ -PATH_TO_CKPT="$HOME/Downloads" # (1)! +# This is the folder where you put the checkpoint file `sd-v1-4.ckpt` +PATH_TO_CKPT="$HOME/Downloads" -ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" \ - models/ldm/stable-diffusion-v1/model.ckpt -``` +# Create a link to the checkpoint +ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt -1. or wherever you saved sd-v1-4.ckpt +# BEGIN ARCHITECTURE-DEPENDENT STEP # +# For M1: Create the environment & install packages +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml -!!! todo "create Conda Environment" +# For Intel: Create the environment & install packages +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yaml +# END ARCHITECTURE-DEPENDENT STEP # - === "M1 arm64" +# Activate the environment (you need to do this every time you want to run SD) +conda activate ldm - ```bash - PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 \ - conda env create \ - -f environment-mac.yml \ - && conda activate ldm - ``` - - - === "Intel x86_64" - - ```bash - PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 \ - conda env create \ - -f environment-mac.yml \ - && conda activate ldm - ``` - -```{.bash .annotate title="preload models and run script"} -# only need to do this once +# This will download some bits and pieces and make take a while python scripts/preload_models.py -# now you can run SD in CLI mode -python scripts/invoke.py --full_precision # (1)! - +# Run SD! +python scripts/dream.py +``` # or run the web interface! python scripts/invoke.py --web From 2bdcc106f2528df9f3401671ad4e49a7b874e9a5 Mon Sep 17 00:00:00 2001 From: Marco Labarile <18102614+labarilem@users.noreply.github.com> Date: Tue, 4 Oct 2022 21:13:54 +0200 Subject: [PATCH 13/30] Fix markdown typo in WEB.md --- docs/features/CHANGELOG.md | 2 +- docs/features/WEB.md | 28 ++++++++++++++++++---------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/docs/features/CHANGELOG.md b/docs/features/CHANGELOG.md index c6fbf092e1..80ec5cf3a2 100644 --- a/docs/features/CHANGELOG.md +++ b/docs/features/CHANGELOG.md @@ -4,7 +4,7 @@ title: Changelog # :octicons-log-16: Changelog -## v1.13 (in process) +## v1.13 - Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516) diff --git a/docs/features/WEB.md b/docs/features/WEB.md index 9bc0d38e88..79f66314fa 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -1,21 +1,29 @@ --- -title: Barebones Web Server +title: InvokeAI Web Server --- -# :material-web: Barebones Web Server - -As of version 1.10, this distribution comes with a bare bones web server (see -screenshot). To use it, run the `invoke.py` script by adding the `--web` -option. +As of version 2.0.0, this distribution comes with a full-featured web +server (see screenshot). To use it, run the `invoke.py` script by +adding the `--web` option: ```bash -(ldm) ~/stable-diffusion$ python3 scripts/invoke.py --web +(ldm) ~/InvokeAI$ python3 scripts/invoke.py --web ``` You can then connect to the server by pointing your web browser at -http://localhost:9090, or to the network name or IP address of the server. +http://localhost:9090. To reach the server from a different machine on +your LAN, you may launch the web server with the `--host` argument and +either the IP address of the host you are running it on, or the +wildcard `0.0.0.0`. For example: -Kudos to [Tesseract Cat](https://github.com/TesseractCat) for contributing this -code, and to [dagf2101](https://github.com/dagf2101) for refining it. +```bash +(ldm) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0 +``` + +Kudos to [Psychedelicious](https://github.com/psychedelicious), +[BlessedCoolant](https://github.com/blessedcoolant), [Tesseract +Cat](https://github.com/TesseractCat), +[dagf2101](https://github.com/dagf2101), and many others who +contributed to this code. ![Dream Web Server](../assets/invoke_web_server.png) From b24d182237236cdb0fa25704c08ff7fb7b3275a8 Mon Sep 17 00:00:00 2001 From: rpagliuca Date: Sat, 1 Oct 2022 15:06:00 -0300 Subject: [PATCH 14/30] Update README.md Small writing error From 7da6fad359fbdfe72dbf47878fa1feab87b4abee Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 11:38:39 -0400 Subject: [PATCH 15/30] add missing doc files --- README.md | 13 +- docs/features/POSTPROCESS.md | 66 ++++----- docs/features/WEB.md | 265 ++++++++++++++++++++++++++++++++++- 3 files changed, 302 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index aa778dcb56..9606d30b9c 100644 --- a/README.md +++ b/README.md @@ -41,10 +41,13 @@ _This repository was formally known as lstein/stable-diffusion_ [latest release link]: https://github.com/invoke-ai/InvokeAI/releases -This is a fork of [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), the open -source text-to-image generator. It provides a streamlined process with various new features and -options to aid the image generation process. It runs on Windows, Mac and Linux machines, and runs on -GPU cards with as little as 4 GB or RAM. +This is a fork of +[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), +the open source text-to-image generator. It provides a streamlined +process with various new features and options to aid the image +generation process. It runs on Windows, Mac and Linux machines, with +GPU cards with as little as 4 GB or RAM. It provides both a polished +Web interface, and an easy-to-use command-line interface. _Note: This fork is rapidly evolving. Please use the [Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature @@ -109,6 +112,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag: #### Major Features +- [Web Server](docs/features/WEB.md) - [Interactive Command Line Interface](docs/features/CLI.md) - [Image To Image](docs/features/IMG2IMG.md) - [Inpainting Support](docs/features/INPAINTING.md) @@ -116,7 +120,6 @@ you can try starting `invoke.py` with the `--precision=float32` flag: - [Upscaling, face-restoration and outpainting](docs/features/POSTPROCESS.md) - [Seamless Tiling](docs/features/OTHER.md#seamless-tiling) - [Google Colab](docs/features/OTHER.md#google-colab) -- [Web Server](docs/features/WEB.md) - [Reading Prompts From File](docs/features/PROMPTS.md#reading-prompts-from-a-file) - [Shortcut: Reusing Seeds](docs/features/OTHER.md#shortcuts-reusing-seeds) - [Prompt Blending](docs/features/PROMPTS.md#prompt-blending) diff --git a/docs/features/POSTPROCESS.md b/docs/features/POSTPROCESS.md index fbcd1c8005..b5156f54f0 100644 --- a/docs/features/POSTPROCESS.md +++ b/docs/features/POSTPROCESS.md @@ -20,39 +20,33 @@ The default face restoration module is GFPGAN. The default upscale is Real-ESRGAN. For an alternative face restoration module, see [CodeFormer Support] below. -As of version 1.14, environment.yaml will install the Real-ESRGAN package into -the standard install location for python packages, and will put GFPGAN into a -subdirectory of "src" in the InvokeAI directory. (The reason for this is -that the standard GFPGAN distribution has a minor bug that adversely affects -image color.) Upscaling with Real-ESRGAN should "just work" without further -intervention. Simply pass the --upscale (-U) option on the invoke> command line, -or indicate the desired scale on the popup in the Web GUI. +As of version 1.14, environment.yaml will install the Real-ESRGAN +package into the standard install location for python packages, and +will put GFPGAN into a subdirectory of "src" in the InvokeAI +directory. Upscaling with Real-ESRGAN should "just work" without +further intervention. Simply pass the --upscale (-U) option on the +invoke> command line, or indicate the desired scale on the popup in +the Web GUI. -For **GFPGAN** to work, there is one additional step needed. You will need to -download and copy the GFPGAN -[models file](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth) -into **src/gfpgan/experiments/pretrained_models**. On Mac and Linux systems, -here's how you'd do it using **wget**: +**GFPGAN** requires a series of downloadable model files to +work. These are loaded when you run `scripts/preload_models.py`. If +GFPAN is failing with an error, please run the following from the +InvokeAI directory: -```bash -wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P src/gfpgan/experiments/pretrained_models/ -``` +~~~~ +python scripts/preload_models.py +~~~~ -Make sure that you're in the InvokeAI directory when you do this. +If you do not run this script in advance, the GFPGAN module will attempt +to download the models files the first time you try to perform facial +reconstruction. -Alternatively, if you have GFPGAN installed elsewhere, or if you are using an -earlier version of this package which asked you to install GFPGAN in a sibling -directory, you may use the `--gfpgan_dir` argument with `invoke.py` to set a -custom path to your GFPGAN directory. _There are other GFPGAN related boot -arguments if you wish to customize further._ - -!!! warning "Internet connection needed" - - Users whose GPU machines are isolated from the Internet (e.g. - on a University cluster) should be aware that the first time you run invoke.py with GFPGAN and - Real-ESRGAN turned on, it will try to download model files from the Internet. To rectify this, you - may run `python3 scripts/preload_models.py` after you have installed GFPGAN and all its - dependencies. +Alternatively, if you have GFPGAN installed elsewhere, or if you are +using an earlier version of this package which asked you to install +GFPGAN in a sibling directory, you may use the `--gfpgan_dir` argument +with `invoke.py` to set a custom path to your GFPGAN directory. _There +are other GFPGAN related boot arguments if you wish to customize +further._ ## Usage @@ -124,15 +118,15 @@ actions. This repo also allows you to perform face restoration using [CodeFormer](https://github.com/sczhou/CodeFormer). -In order to setup CodeFormer to work, you need to download the models like with -GFPGAN. You can do this either by running `preload_models.py` or by manually -downloading the -[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth) +In order to setup CodeFormer to work, you need to download the models +like with GFPGAN. You can do this either by running +`preload_models.py` or by manually downloading the [model +file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth) and saving it to `ldm/restoration/codeformer/weights` folder. -You can use `-ft` prompt argument to swap between CodeFormer and the default -GFPGAN. The above mentioned `-G` prompt argument will allow you to control the -strength of the restoration effect. +You can use `-ft` prompt argument to swap between CodeFormer and the +default GFPGAN. The above mentioned `-G` prompt argument will allow +you to control the strength of the restoration effect. ### Usage: diff --git a/docs/features/WEB.md b/docs/features/WEB.md index 79f66314fa..e634a83be2 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -20,10 +20,273 @@ wildcard `0.0.0.0`. For example: (ldm) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0 ``` +# Quick guided walkthrough of the WebGUI's features + +While most of the WebGUI's features are intuitive, here is a guided +walkthrough through its various components. + + + +The screenshot above shows the Text to Image tab of the WebGUI. There +are three main sections: + +1. A **control panel** on the left, which contains various settings +for text to image generation. The most important part is the text +field (currently showing `strawberry sushi`) for entering the text +prompt, and the camera icon directly underneath that will render the +image. We'll call this the *Invoke* button from now on. + +2. The **current image** section in the middle, which shows a large +format version of the image you are currently working on. A series of +buttons at the top ("image to image", "Use All", "Use Seed", etc) lets +you modify the image in various ways. + +3. A **gallery* section on the left that contains a history of the +images you have generated. These images are read and written to the +directory specified at launch time in `--outdir`. + +In addition to these three elements, there are a series of icons for +changing global settings, reporting bugs, and changing the theme on +the upper right. + +There are also a series of icons to the left of the control panel (see +highlighted area in the screenshot below) which select among a series +of tabs for performing different types of operations. + + + +From top to bottom, these are: + +1. Text to Image - generate images from text +2. Image to Image - from an uploaded starting image (drawing or photograph) generate a new one, modified by the text prompt +3. Inpainting (pending) - Interactively erase portions of a starting image and have the AI fill in the erased region from a text prompt. +4. Outpainting (pending) - Interactively add blank space to the borders of a starting image and fill in the background from a text prompt. +5. Postprocessing (pending) - Interactively postprocess generated images using a variety of filters. + +The inpainting, outpainting and postprocessing tabs are currently in +development. However, limited versions of their features can already +be accessed through the Text to Image and Image to Image tabs. + +## Walkthrough + +The following walkthrough will exercise most (but not all) of the +WebGUI's feature set. + +### Text to Image + +1. Launch the WebGUI using `python scripts/invoke.py --web` and +connect to it with your browser by accessing +`http://localhost:9090`. If the browser and server are running on +different machines on your LAN, add the option `--host 0.0.0.0` to the +launch command line and connect to the machine hosting the web server +using its IP address or domain name. + +2. If all goes well, the WebGUI should come up and you'll see a green +`connected` message on the upper right. + +#### Basics + +3. Generate an image by typing *strawberry sushi* into the large +prompt field on the upper left and then clicking on the Invoke button +(the one with the Camera icon). After a short wait, you'll see a large +image of sushi in the image panel, and a new thumbnail in the gallery +on the right. + +If you need more room on the screen, you can turn the gallery off +by clicking on the **x** to the right of "Your Invocations". You can +turn it back on later by clicking the image icon that appears in the +gallery's place. + +The images are written into the directory indicated by the `--outdir` +option provided at script launch time. By default, this is +`outputs/img-samples` under the InvokeAI directory. + +4. Generate a bunch of strawberry sushi images by increasing the +number of requested images by adjusting the Images counter just below +the Camera button. As each is generated, it will be added to the +gallery. You can switch the active image by clicking on the gallery +thumbnails. + +5. Try playing with different settings, including image width and +height, the Sampler, the Steps and the CFG scale. + +Image *Width* and *Height* do what you'd expect. However, be aware that +larger images consume more VRAM memory and take longer to generate. + +The *Sampler* controls how the AI selects the image to display. Some +samplers are more "creative" than others and will produce a wider +range of variations (see next section). Some samplers run faster than +others. + +*Steps* controls how many noising/denoising/sampling steps the AI will +take. The higher this value, the more refined the image will be, but +the longer the image will take to generate. A typical strategy is to +generate images with a low number of steps in order to select one to +work on further, and then regenerate it using a higher number of +steps. + +The *CFG Scale* controls how hard the AI tries to match the generated +image to the input prompt. You can go as high or low as you like, but +generally values greater than 20 won't improve things much, and values +lower than 5 will produce unexpected images. There are complex +interactions between *Steps*, *CFG Scale* and the *Sampler*, so +experiment to find out what works for you. + +6. To regenerate a previously-generated image, select the image you +want and click *Use All*. This loads the text prompt and other +original settings into the control panel. If you then press *Invoke* +it will regenerate the image exactly. You can also selectively modify +the prompt or other settings to tweak the image. + +Alternatively, you may click on *Use Seed* to load just the image's +seed, and leave other settings unchanged. + +7. To regenerate a Stable Diffusion image that was generated by +another SD package, you need to know its text prompt and its +*Seed*. Copy-paste the prompt into the prompt box, unset the +*Randomize Seed* control in the control panel, and copy-paste the +desired *Seed* into its text field. When you Invoke, you will get +something similar to the original image. It will not be exact unless +you also set the correct values for the original sampler, CFG, +steps and dimensions, but it will (usually) be close. + +#### Variations on a theme + +5. Let's try generating some variations. Select your favorite sushi +image from the gallery to load it. Then select "Use All" from the list +of buttons above. This will load up all the settings used to generate +this image, including its unique seed. + +Go down to the Variations section of the Control Panel and set the +button to On. Set Variation Amount to 0.2 to generate a modest +number of variations on the image, and also set the Image counter to +4. Press the `invoke` button. This will generate a series of related +images. To obtain smaller variations, just lower the Variation +Amount. You may also experiment with changing the Sampler. Some +samplers generate more variability than others. *k_euler_a* is +particularly creative, while *ddim* is pretty conservative. + +6. For even more variations, experiment with increasing the setting +for *Perlin*. This adds a bit of noise to the image generation +process. Note that values of Perlin noise greater than 0.15 produce +poor images for several of the samplers. + +#### Facial reconstruction and upscaling + +Stable Diffusion frequently produces mangled faces, particularly when +there are multiple figures in the same scene. Stable Diffusion has +particular issues with generating reallistic eyes. InvokeAI provides +the ability to reconstruct faces using either the GFPGAN or CodeFormer +libraries. For more information see [POSTPROCESS](POSTPROCESS.md). + +7. Invoke a prompt that generates a mangled face. A prompt that often +gives this is "portrait of a lawyer, 3/4 shot" (this is not intended +as a slur against lawyers!) Once you have an image that needs some +touching up, load it into the Image panel, and press the button with +the face icon (highlighted in the first screenshot below). A dialog +box will appear. Leave *Strength* at 0.8 and press *Restore Faces". If +all goes well, the eyes and other aspects of the face will be improved +(see the second screenshot) + + + + +The facial reconstruction *Strength* field adjusts how aggressively +the face library will try to alter the face. It can be as high as 1.0, +but be aware that this often softens the face airbrush style, losing +some details. The default 0.8 is usually sufficient. + +8. "Upscaling" is the process of increasing the size of an image while +retaining the sharpness. InvokeAI uses an external library called +"ESRGAN" to do this. To invoke upscaling, simply select an image and +press the *HD* button above it. You can select between 2X and 4X +upscaling, and adjust the upscaling strength, which has much the same +meaning as in facial reconstruction. Try running this on one of your +previously-generated images. + +9. Finally, you can run facial reconstruction and/or upscaling +automatically after each Invocation. Go to the Advanced Options +section of the Control Panel and turn on *Restore Face* and/or +*Upscale*. + +### Image to Image + +InvokeAI lets you take an existing image and use it as the basis for a +new creation. You can use any sort of image, including a photograph, a +scanned sketch, or a digital drawing, as long as it is in PNG or JPEG +format. + +For this tutorial, we'll use files named +[Lincoln-and-Parrot-512.png](../assets/Lincoln-and-Parrot-512.png), +and +[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png). +Download these images to your local machine now to continue with the walkthrough. + +10. Click on the *Image to Image* tab icon, which is the second icon +from the top on the left-hand side of the screen: + + + +This will bring you to a screen similar to the one shown here: + + + +Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or +click the blank area to get an upload dialog. The image will load into +an area marked *Initial Image*. (The WebGUI will also load the most +recently-generated image from the gallery into a section on the left, +but this image will be replaced in the next step.) + +11. Go to the prompt box and type *old sea captain with raven on +shoulder* and press Invoke. A derived image will appear to the right +of the original one: + + + +12. Experiment with the different settings. The most influential one +in Image to Image is *Image to Image Strength* located about midway +down the control panel. By default it is set to 0.75, but can range +from 0.0 to 0.99. The higher the value, the more of the original image +the AI will replace. A value of 0 will leave the initial image +completely unchanged, while 0.99 will replace it completely. However, +the Sampler and CFG Scale also influence the final result. You can +also generate variations in the same way as described in Text to +Image. + +13. What if we only want to change certain part(s) of the image and +leave the rest intact? This is called Inpainting, and a future version +of the InvokeAI web server will provide an interactive painting canvas +on which you can directly draw the areas you wish to Inpaint into. For +now, you can achieve this effect by using an external photoeditor tool +to make one or more regions of the image transparent as described in +[INPAINTING.md] and uploading that. + +The file +[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png) +is a version of the earlier image in which the area around the parrot +has been replaced with transparency. Click on the "x" in the upper +right of the Initial Image and upload the transparent version. Using +the same prompt "old sea captain with raven on shoulder" try Invoking +an image. This time, only the parrot will be replaced, leaving the +rest of the original image intact: + + + +## Parting remarks + +This concludes the walkthrough, but there are several more features that you +can explore. Please check out the [Command Line Interface](CLI.md) +documentation for further explanation of the advanced features that +were not covered here. + +The WebGUI is only rapid development. Check back regularly for +updates! + +## Credits + Kudos to [Psychedelicious](https://github.com/psychedelicious), [BlessedCoolant](https://github.com/blessedcoolant), [Tesseract Cat](https://github.com/TesseractCat), [dagf2101](https://github.com/dagf2101), and many others who contributed to this code. -![Dream Web Server](../assets/invoke_web_server.png) From 08a0b85111126619643c8a8565a790902bfcc214 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 11:42:03 -0400 Subject: [PATCH 16/30] fix image links in documentation --- docs/features/WEB.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/features/WEB.md b/docs/features/WEB.md index e634a83be2..533fb447b7 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -25,7 +25,7 @@ wildcard `0.0.0.0`. For example: While most of the WebGUI's features are intuitive, here is a guided walkthrough through its various components. - + The screenshot above shows the Text to Image tab of the WebGUI. There are three main sections: @@ -53,7 +53,7 @@ There are also a series of icons to the left of the control panel (see highlighted area in the screenshot below) which select among a series of tabs for performing different types of operations. - + From top to bottom, these are: From 40894d67ac15b4665287fa0bae7f812c78e5b0aa Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 11:42:59 -0400 Subject: [PATCH 17/30] fixup image sizes in WEB.md --- docs/features/WEB.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/features/WEB.md b/docs/features/WEB.md index 533fb447b7..d2e61f46b5 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -25,7 +25,7 @@ wildcard `0.0.0.0`. For example: While most of the WebGUI's features are intuitive, here is a guided walkthrough through its various components. - + The screenshot above shows the Text to Image tab of the WebGUI. There are three main sections: @@ -53,7 +53,7 @@ There are also a series of icons to the left of the control panel (see highlighted area in the screenshot below) which select among a series of tabs for performing different types of operations. - + From top to bottom, these are: From 400f062771b35578550a23e90737d451236de4a3 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 11:43:42 -0400 Subject: [PATCH 18/30] make initial screenshot even larger --- docs/features/WEB.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/features/WEB.md b/docs/features/WEB.md index d2e61f46b5..e307c60394 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -25,7 +25,7 @@ wildcard `0.0.0.0`. For example: While most of the WebGUI's features are intuitive, here is a guided walkthrough through its various components. - + The screenshot above shows the Text to Image tab of the WebGUI. There are three main sections: From d1dd35a1d21f7701d2ffdb29f7651690a6a1fc1b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 11:45:16 -0400 Subject: [PATCH 19/30] final tweak to embedded screenshots in WEB.md --- docs/features/WEB.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/features/WEB.md b/docs/features/WEB.md index e307c60394..1ca2ffdc2e 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -229,7 +229,7 @@ from the top on the left-hand side of the screen: This will bring you to a screen similar to the one shown here: - + Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click the blank area to get an upload dialog. The image will load into @@ -241,7 +241,7 @@ but this image will be replaced in the next step.) shoulder* and press Invoke. A derived image will appear to the right of the original one: - + 12. Experiment with the different settings. The most influential one in Image to Image is *Image to Image Strength* located about midway @@ -270,7 +270,7 @@ the same prompt "old sea captain with raven on shoulder" try Invoking an image. This time, only the parrot will be replaced, leaving the rest of the original image intact: - + ## Parting remarks From 1b6fab59a45ed8ceba998aef36a38bafd6e5d8c5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 15:29:04 -0400 Subject: [PATCH 20/30] run make_schedule() if it hasn't already been called - fixes #984 --- ldm/models/diffusion/ksampler.py | 8 ++++++++ ldm/models/diffusion/sampler.py | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/ldm/models/diffusion/ksampler.py b/ldm/models/diffusion/ksampler.py index a947296f9e..1e44fe1fdf 100644 --- a/ldm/models/diffusion/ksampler.py +++ b/ldm/models/diffusion/ksampler.py @@ -171,6 +171,14 @@ class KSampler(Sampler): if img_callback is not None: img_callback(k_callback_values['x'],k_callback_values['i']) + # if make_schedule() hasn't been called, we do it now + if self.sigmas is None: + self.make_schedule( + ddim_num_steps=S, + ddim_eta = eta, + verbose = False, + ) + # sigmas are set up in make_schedule - we take the last steps items total_steps = len(self.sigmas) sigmas = self.sigmas[-S-1:] diff --git a/ldm/models/diffusion/sampler.py b/ldm/models/diffusion/sampler.py index 8b19f894e5..88cdc01974 100644 --- a/ldm/models/diffusion/sampler.py +++ b/ldm/models/diffusion/sampler.py @@ -20,6 +20,7 @@ from ldm.modules.diffusionmodules.util import ( class Sampler(object): def __init__(self, model, schedule='linear', steps=None, device=None, **kwargs): self.model = model + self.ddim_timesteps = None self.ddpm_num_timesteps = steps self.schedule = schedule self.device = device or choose_torch_device() @@ -157,6 +158,14 @@ class Sampler(object): **kwargs, ): + # check to see if make_schedule() has run, and if not, run it + if self.ddim_timesteps is None: + self.make_schedule( + ddim_num_steps=S, + ddim_eta = eta, + verbose = False, + ) + ts = self.get_timesteps(S) # sampling From bc8e86e643c1526721bd35a0d1c916aad4167241 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 15:33:56 -0400 Subject: [PATCH 21/30] fix environment-mac.yml as per #964 --- docs/installation/INSTALL_MAC.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index 3d8a282a80..99d06d4eb0 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -92,10 +92,10 @@ ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt # BEGIN ARCHITECTURE-DEPENDENT STEP # # For M1: Create the environment & install packages -PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yml # For Intel: Create the environment & install packages -PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yaml +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yml # END ARCHITECTURE-DEPENDENT STEP # # Activate the environment (you need to do this every time you want to run SD) @@ -347,7 +347,7 @@ python scripts/preload_models.py ``` The InvokeAI version includes this fix in -[environment-mac.yaml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yaml). +[environment-mac.yml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yml). ### "Could not build wheels for tokenizers" From b82c968278dbee07e635f252c4d5eb45914891b2 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 15:37:45 -0400 Subject: [PATCH 22/30] fix references from lstein/stable-diffusion to invoke-ai/InvokeAI - as per #989 --- docs/index.md | 32 ++++++++++---------- notebooks/Stable_Diffusion_AI_Notebook.ipynb | 8 ++--- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/index.md b/docs/index.md index bd04545904..dc3bc35cec 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,24 +25,24 @@ template: main.html [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] -[CI checks on dev badge]: https://flat.badgen.net/github/checks/lstein/stable-diffusion/development?label=CI%20status%20on%20dev&cache=900&icon=github -[CI checks on dev link]: https://github.com/lstein/stable-diffusion/actions?query=branch%3Adevelopment -[CI checks on main badge]: https://flat.badgen.net/github/checks/lstein/stable-diffusion/main?label=CI%20status%20on%20main&cache=900&icon=github -[CI checks on main link]: https://github.com/lstein/stable-diffusion/actions/workflows/test-invoke-conda.yml +[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github +[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment +[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github +[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml [discord badge]: https://flat.badgen.net/discord/members/htRgbc7e?icon=discord [discord link]: https://discord.com/invite/htRgbc7e -[github forks badge]: https://flat.badgen.net/github/forks/lstein/stable-diffusion?icon=github +[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github [github forks link]: https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion -[github open issues badge]: https://flat.badgen.net/github/open-issues/lstein/stable-diffusion?icon=github -[github open issues link]: https://github.com/lstein/stable-diffusion/issues?q=is%3Aissue+is%3Aopen -[github open prs badge]: https://flat.badgen.net/github/open-prs/lstein/stable-diffusion?icon=github -[github open prs link]: https://github.com/lstein/stable-diffusion/pulls?q=is%3Apr+is%3Aopen -[github stars badge]: https://flat.badgen.net/github/stars/lstein/stable-diffusion?icon=github -[github stars link]: https://github.com/lstein/stable-diffusion/stargazers -[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/lstein/stable-diffusion/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900 -[latest commit to dev link]: https://github.com/lstein/stable-diffusion/commits/development -[latest release badge]: https://flat.badgen.net/github/release/lstein/stable-diffusion/development?icon=github -[latest release link]: https://github.com/lstein/stable-diffusion/releases +[github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github +[github open issues link]: https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen +[github open prs badge]: https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github +[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen +[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github +[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers +[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900 +[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development +[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github +[latest release link]: https://github.com/invoke-ai/InvokeAI/releases @@ -54,7 +54,7 @@ GPU cards with as little as 4 GB or RAM. !!! note This fork is rapidly evolving. Please use the - [Issues](https://github.com/lstein/stable-diffusion/issues) tab to report bugs and make feature + [Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster. ## :octicons-package-dependencies-24: Installation diff --git a/notebooks/Stable_Diffusion_AI_Notebook.ipynb b/notebooks/Stable_Diffusion_AI_Notebook.ipynb index 129323bb15..6766727c18 100644 --- a/notebooks/Stable_Diffusion_AI_Notebook.ipynb +++ b/notebooks/Stable_Diffusion_AI_Notebook.ipynb @@ -18,7 +18,7 @@ "---\n", "Note: It takes some time to load, but after installing all dependencies you can use the bot all time you want while colab instance is up.
\n", "Requirements: For this notebook to work you need to have [Stable-Diffusion-v-1-4](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) stored in your Google Drive, it will be needed in cell #7\n", - "##### For more details visit Github repository: [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion)\n", + "##### For more details visit Github repository: [invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)\n", "---\n" ] }, @@ -57,7 +57,7 @@ "#@title 2. Download stable-diffusion Repository\n", "from os.path import exists\n", "\n", - "!git clone --quiet https://github.com/lstein/stable-diffusion.git # Original repo\n", + "!git clone --quiet https://github.com/invoke-ai/InvokeAI.git # Original repo\n", "%cd /content/stable-diffusion/\n", "!git checkout --quiet tags/release-1.14.1" ] @@ -74,8 +74,8 @@ "#@title 3. Install dependencies\n", "import gc\n", "\n", - "!wget https://raw.githubusercontent.com/lstein/stable-diffusion/development/requirements.txt\n", - "!wget https://raw.githubusercontent.com/lstein/stable-diffusion/development/requirements-lin-win-colab-CUDA.txt\n", + "!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/requirements.txt\n", + "!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/requirements-lin-win-colab-CUDA.txt\n", "!pip install colab-xterm\n", "!pip install -r requirements-lin-win-colab-CUDA.txt\n", "!pip install clean-fid torchtext\n", From 8ce3d4dd7f81ed7aff2472a57379325cc7afa1c1 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 16:36:00 -0400 Subject: [PATCH 23/30] add short list of 2.0.0 new features --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9606d30b9c..2d981ff226 100644 --- a/README.md +++ b/README.md @@ -136,8 +136,27 @@ you can try starting `invoke.py` with the `--precision=float32` flag: ### Latest Changes -- vNEXT (TODO 2022) +- v2.0.0 (9 October 2022) + - `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains + for backward compatibility. + - Completely new WebGUI - launch with `python3 scripts/invoke.py --web` + - Support for inpainting and outpainting + - img2img runs on all k* samplers + - Support for CodeFormer face reconstruction + - Support for Textual Inversion on Macintoshes + - Support in both WebGUI and CLI for post-processing of previously-generated images + using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas), + and "embiggen" upscaling. See the `!fix` command. + - New `--hires` option on `invoke>` line allows larger images to be created without + duplicating elements, at the cost of some performance. + - Extensive metadata now written into PNG files, allowing reliable regeneration of images + and tweaking of previous settings. + - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. + - Improved command-line completion behavior and new commands added: + * List command-line history with `!history` + * Search command-line history with `!search` + * Clear history with `!clear` - Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto configure. To switch away from auto use the new flag like `--precision=float32`. From 4fbd11a1f2523af88798e6ef94efefdf3940bc07 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 16:39:36 -0400 Subject: [PATCH 24/30] add perlin noise to list of new features --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 2d981ff226..17eddf2d52 100644 --- a/README.md +++ b/README.md @@ -150,6 +150,8 @@ you can try starting `invoke.py` with the `--precision=float32` flag: and "embiggen" upscaling. See the `!fix` command. - New `--hires` option on `invoke>` line allows larger images to be created without duplicating elements, at the cost of some performance. + - New `--perlin` and `--threshold` options allow you to add and control variation + during image generation (see [docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options]) - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. From a4c36dbc150a0746d88a22765301073fa23d5d37 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 16:41:05 -0400 Subject: [PATCH 25/30] fix link error --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 17eddf2d52..050686d5b0 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag: - New `--hires` option on `invoke>` line allows larger images to be created without duplicating elements, at the cost of some performance. - New `--perlin` and `--threshold` options allow you to add and control variation - during image generation (see [docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options]) + during image generation (see [docs/features/OTHER.md]) - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. From 49380f75a9b884c4c92f604d4a8c0189f5f9afce Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 18:25:18 -0400 Subject: [PATCH 26/30] frontend rebuild --- frontend/dist/index.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/dist/index.html b/frontend/dist/index.html index 7fac16ba9e..121585d12c 100644 --- a/frontend/dist/index.html +++ b/frontend/dist/index.html @@ -15,4 +15,4 @@ - + \ No newline at end of file From 7325b7307371cfd173f36dd9aec87e735cc1dc23 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 18:41:57 -0400 Subject: [PATCH 27/30] add more features to changelog --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 256f604975..2ef38470ac 100644 --- a/README.md +++ b/README.md @@ -141,17 +141,17 @@ you can try starting `invoke.py` with the `--precision=float32` flag: - `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for backward compatibility. - Completely new WebGUI - launch with `python3 scripts/invoke.py --web` - - Support for inpainting and outpainting + - Support for inpainting and outpainting - img2img runs on all k* samplers + - Support for post-processing of previously-generated images using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen" upscaling. See the `!fix` command. - - New `--hires` option on `invoke>` line allows larger images to be created without - duplicating elements, at the cost of some performance. + - New `--hires` option on `invoke>` line allows larger images to be created without duplicating elements, at the cost of some performance. - New `--perlin` and `--threshold` options allow you to add and control variation - during image generation (see [docs/features/OTHER.md]) + during image generation (see Thresholding and Perlin Noise Initialization - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. From 0c479cd7069081a592db3dd04afb17118620b48d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 18:43:09 -0400 Subject: [PATCH 28/30] fix typos --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2ef38470ac..542418a480 100644 --- a/README.md +++ b/README.md @@ -143,7 +143,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag: - Completely new WebGUI - launch with `python3 scripts/invoke.py --web` - Support for inpainting and outpainting - img2img runs on all k* samplers - - Support for negative prompts - Support for CodeFormer face reconstruction - Support for Textual Inversion on Macintoshes - Support in both WebGUI and CLI for post-processing of previously-generated images @@ -151,7 +151,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag: and "embiggen" upscaling. See the `!fix` command. - New `--hires` option on `invoke>` line allows larger images to be created without duplicating elements, at the cost of some performance. - New `--perlin` and `--threshold` options allow you to add and control variation - during image generation (see Thresholding and Perlin Noise Initialization + during image generation (see Thresholding and Perlin Noise Initialization - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. From be5bf03cccd7c48cb5cb35a4edfaac45ce9969b2 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 18:44:31 -0400 Subject: [PATCH 29/30] add links for history processing --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 542418a480..385d51254f 100644 --- a/README.md +++ b/README.md @@ -155,7 +155,8 @@ you can try starting `invoke.py` with the `--precision=float32` flag: - Extensive metadata now written into PNG files, allowing reliable regeneration of images and tweaking of previous settings. - Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms. - - Improved command-line completion behavior and new commands added: + - Improved command-line completion behavior. + New commands added: * List command-line history with `!history` * Search command-line history with `!search` * Clear history with `!clear` From 275dca83be3757415c4e0217a55c3cf581380f07 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Oct 2022 18:46:23 -0400 Subject: [PATCH 30/30] Update OUTPAINTING.md fix typo --- docs/features/OUTPAINTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/features/OUTPAINTING.md b/docs/features/OUTPAINTING.md index 7d54f1bfc9..6d30b59cac 100644 --- a/docs/features/OUTPAINTING.md +++ b/docs/features/OUTPAINTING.md @@ -70,7 +70,7 @@ additional 64 pixels to the top of the image: invoke> !fix images/curly.png --out_direction top 64 ~~~ -(you can abbreviate ``--out_direction` as `-D`. +(you can abbreviate `--out_direction` as `-D`. The result is shown here: