From d87bd29a6862996d8a0980c1343b6f0d4eb718b4 Mon Sep 17 00:00:00 2001 From: bsilvereagle Date: Mon, 26 Sep 2022 01:02:21 -0400 Subject: [PATCH 01/14] Change to InvokeAI git repo and folder names --- docs/installation/INSTALL_WINDOWS.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/installation/INSTALL_WINDOWS.md b/docs/installation/INSTALL_WINDOWS.md index 238988a15a..a6a733e8b5 100644 --- a/docs/installation/INSTALL_WINDOWS.md +++ b/docs/installation/INSTALL_WINDOWS.md @@ -33,15 +33,15 @@ in the wiki 4. Run the command: ``` -git clone https://github.com/lstein/stable-diffusion.git +git clone https://github.com/invoke-ai/InvokeAI.git ``` This will create stable-diffusion folder where you will follow the rest of the steps. -5. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory! +5. Enter the newly-created InvokeAI folder. From this step forward make sure that you are working in the InvokeAI directory! ``` -cd stable-diffusion +cd InvokeAI ``` 6. Run the following two commands: @@ -74,7 +74,7 @@ Note: This step is required. This was done because some users may might be block - The weight file is >4 GB in size, so downloading may take a while. -Now run the following commands from **within the stable-diffusion directory** to copy the weights file to the right place: +Now run the following commands from **within the InvokeAI directory** to copy the weights file to the right place: ``` mkdir -p models\ldm\stable-diffusion-v1 @@ -94,7 +94,7 @@ python scripts\dream.py -l python scripts\dream.py ``` -10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the stable-diffusion directory (step 5, `cd \path\to\stable-diffusion`), run `conda activate ldm` (step 6b), and then launch the dream script (step 9). +10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the InvokeAI directory (step 5, `cd \path\to\InvokeAI`), run `conda activate ldm` (step 6b), and then launch the dream script (step 9). **Note:** Tildebyte has written an alternative ["Easy peasy Windows install"](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) @@ -102,7 +102,7 @@ which uses the Windows Powershell and pew. If you are having trouble with Anacon ### Updating to newer versions of the script -This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the stable-diffusion directory, then to update to the latest and greatest version, launch the Anaconda window, enter `stable-diffusion`, and type: +This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the InvokeAI directory, then to update to the latest and greatest version, launch the Anaconda window, enter `InvokeAI`, and type: ``` git pull From b1a3fd945d1863a547b4375a094f1eca091986b2 Mon Sep 17 00:00:00 2001 From: Chris Hayes <6013871+Christopher-Hayes@users.noreply.github.com> Date: Tue, 27 Sep 2022 00:58:05 -0400 Subject: [PATCH 02/14] Rename environment files to use default .yml extension --- .github/workflows/create-caches.yml | 4 ++-- .github/workflows/test-dream-conda.yml | 4 ++-- docs/CHANGELOG.md | 4 ++-- docs/README-CompViz.md | 2 +- docs/features/UPSCALE.md | 2 +- docs/help/TROUBLESHOOT.md | 6 +++--- docs/installation/INSTALL_LINUX.md | 2 +- docs/installation/INSTALL_MAC.md | 10 +++++----- docs/installation/INSTALL_WINDOWS.md | 6 +++--- environment-mac.yaml => environment-mac.yml | 2 +- environment.yaml => environment.yml | 0 11 files changed, 21 insertions(+), 21 deletions(-) rename environment-mac.yaml => environment-mac.yml (96%) rename environment.yaml => environment.yml (100%) diff --git a/.github/workflows/create-caches.yml b/.github/workflows/create-caches.yml index 951718af1b..e21286a407 100644 --- a/.github/workflows/create-caches.yml +++ b/.github/workflows/create-caches.yml @@ -13,10 +13,10 @@ jobs: id: vars run: | if [ "$RUNNER_OS" = "macOS" ]; then - echo "::set-output name=ENV_FILE::environment-mac.yaml" + echo "::set-output name=ENV_FILE::environment-mac.yml" echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python" elif [ "$RUNNER_OS" = "Linux" ]; then - echo "::set-output name=ENV_FILE::environment.yaml" + echo "::set-output name=ENV_FILE::environment.yml" echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python" fi - name: Checkout sources diff --git a/.github/workflows/test-dream-conda.yml b/.github/workflows/test-dream-conda.yml index 3bd9b24582..278bf1b57d 100644 --- a/.github/workflows/test-dream-conda.yml +++ b/.github/workflows/test-dream-conda.yml @@ -19,10 +19,10 @@ jobs: run: | # Note, can't "activate" via github action; specifying the env's python has the same effect if [ "$RUNNER_OS" = "macOS" ]; then - echo "::set-output name=ENV_FILE::environment-mac.yaml" + echo "::set-output name=ENV_FILE::environment-mac.yml" echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python" elif [ "$RUNNER_OS" = "Linux" ]; then - echo "::set-output name=ENV_FILE::environment.yaml" + echo "::set-output name=ENV_FILE::environment.yml" echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python" fi - name: Checkout sources diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 31dc8e80db..f0d61aeb97 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -119,14 +119,14 @@ - A copy of the prompt and all of its switches and options is now stored in the corresponding image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py, or an image editor that allows you to explore the full metadata. - **Please run "conda env update -f environment.yaml" to load the k_lms dependencies!!** + **Please run "conda env update" to load the k_lms dependencies!!** --- ## v1.01 (21 August 2022) - added k_lms sampling. - **Please run "conda env update -f environment.yaml" to load the k_lms dependencies!!** + **Please run "conda env update" to load the k_lms dependencies!!** - use half precision arithmetic by default, resulting in faster execution and lower memory requirements Pass argument --full_precision to dream.py to get slower but more accurate image generation diff --git a/docs/README-CompViz.md b/docs/README-CompViz.md index ed7df6a4ea..4780bfeaa4 100644 --- a/docs/README-CompViz.md +++ b/docs/README-CompViz.md @@ -28,7 +28,7 @@ A suitable [conda](https://conda.io/) environment named `ldm` can be created and activated with: ``` -conda env create -f environment.yaml +conda env create conda activate ldm ``` diff --git a/docs/features/UPSCALE.md b/docs/features/UPSCALE.md index 381d2b90ec..d549eff443 100644 --- a/docs/features/UPSCALE.md +++ b/docs/features/UPSCALE.md @@ -3,7 +3,7 @@ The script also provides the ability to do face restoration and upscaling with the help of GFPGAN and Real-ESRGAN respectively. -As of version 1.14, environment.yaml will install the Real-ESRGAN package into the +As of version 1.14, environment.yml will install the Real-ESRGAN package into the standard install location for python packages, and will put GFPGAN into a subdirectory of "src" in the stable-diffusion directory. (The reason for this is that the standard GFPGAN distribution has a minor bug that adversely affects image diff --git a/docs/help/TROUBLESHOOT.md b/docs/help/TROUBLESHOOT.md index cac5dddf23..a9e819dd7d 100644 --- a/docs/help/TROUBLESHOOT.md +++ b/docs/help/TROUBLESHOOT.md @@ -7,7 +7,7 @@ install process. **QUESTION** -During `conda env create -f environment.yaml`, conda hangs indefinitely. +During `conda env create`, conda hangs indefinitely. **SOLUTION** @@ -31,7 +31,7 @@ Reinstall the stable diffusion modules. Enter the `stable-diffusion` directory a **SOLUTION** -From within the `stable-diffusion` directory, run `conda env update -f environment.yaml` This is also frequently the solution to +From within the `stable-diffusion` directory, run `conda env update` This is also frequently the solution to complaints about an unknown function in a module. --- @@ -46,7 +46,7 @@ There's a feature or bugfix in the Stable Diffusion GitHub that you want to try If the fix/feature is on the `main` branch, enter the stable-diffusion directory and do a `git pull`. -Usually this will be sufficient, but if you start to see errors about missing or incorrect modules, use the command `pip install -e .` and/or `conda env update -f environment.yaml` (These commands won't break anything.) +Usually this will be sufficient, but if you start to see errors about missing or incorrect modules, use the command `pip install -e .` and/or `conda env update` (These commands won't break anything.) **Sub Branch** diff --git a/docs/installation/INSTALL_LINUX.md b/docs/installation/INSTALL_LINUX.md index 2a64f1eb41..9e7df04663 100644 --- a/docs/installation/INSTALL_LINUX.md +++ b/docs/installation/INSTALL_LINUX.md @@ -34,7 +34,7 @@ This will create InvokeAI folder where you will follow the rest of the steps. 5. Use anaconda to copy necessary python packages, create a new python environment named `ldm` and activate the environment. ``` -(base) ~/InvokeAI$ conda env create -f environment.yaml +(base) ~/InvokeAI$ conda env create (base) ~/InvokeAI$ conda activate ldm (ldm) ~/InvokeAI$ ``` diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index c000e818bb..29687f6736 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -76,7 +76,7 @@ PATH_TO_CKPT="$HOME/Downloads" # or wherever you saved sd-v1-4.ckpt ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt # install packages -PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yml conda activate ldm # only need to do this once @@ -93,7 +93,7 @@ python scripts/orig_scripts/txt2img.py --prompt "a photograph of an astronaut ri ``` Note, `export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env -create -f environment-mac.yaml` never finishing in some situations. So +create -f environment-mac.yml` never finishing in some situations. So it isn't required but wont hurt. After you follow all the instructions and run dream.py you might get several errors. Here's the errors I've seen and found solutions for. @@ -112,7 +112,7 @@ One debugging step is to update to the latest version of PyTorch nightly. conda install pytorch torchvision torchaudio -c pytorch-nightly -If `conda env create -f environment-mac.yaml` takes forever run this. +If `conda env create -f environment-mac.yml` takes forever run this. git clean -f @@ -140,7 +140,7 @@ Third, if it says you're missing taming you need to rebuild your virtual environment. conda env remove -n ldm - conda env create -f environment-mac.yaml + conda env create -f environment-mac.yml Fourth, If you have activated the ldm virtual environment and tried rebuilding it, maybe the problem could be that I have something installed that you don't and you'll just need to manually install it. Make sure you activate the virtual environment so it installs there instead of globally. @@ -239,7 +239,7 @@ Example error. NotImplementedError: The operator 'aten::_index_put_impl_' is not current implemented for the MPS device. If you want this op to be added in priority during the prototype phase of this feature, please comment on [https://github.com/pytorch/pytorch/issues/77764](https://github.com/pytorch/pytorch/issues/77764). As a temporary fix, you can set the environment variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be slower than running natively on MPS. ``` -The lstein branch includes this fix in [environment-mac.yaml](https://github.com/lstein/stable-diffusion/blob/main/environment-mac.yaml). +The lstein branch includes this fix in [environment-mac.yml](https://github.com/lstein/stable-diffusion/blob/main/environment-mac.yml). ### "Could not build wheels for tokenizers" diff --git a/docs/installation/INSTALL_WINDOWS.md b/docs/installation/INSTALL_WINDOWS.md index a6a733e8b5..439e3a7e79 100644 --- a/docs/installation/INSTALL_WINDOWS.md +++ b/docs/installation/INSTALL_WINDOWS.md @@ -47,8 +47,8 @@ cd InvokeAI 6. Run the following two commands: ``` -conda env create -f environment.yaml (step 6a) -conda activate ldm (step 6b) +conda env create (step 6a) +conda activate ldm (step 6b) ``` This will install all python requirements and activate the "ldm" @@ -106,7 +106,7 @@ This distribution is changing rapidly. If you used the `git clone` method (step ``` git pull -conda env update -f environment.yaml +conda env update ``` This will bring your local copy into sync with the remote one. diff --git a/environment-mac.yaml b/environment-mac.yml similarity index 96% rename from environment-mac.yaml rename to environment-mac.yml index 8067c712ee..f5fa542461 100644 --- a/environment-mac.yaml +++ b/environment-mac.yml @@ -14,7 +14,7 @@ dependencies: # To determine what the latest versions should be, run: # # ```shell - # sed -E 's/ldm/ldm-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yaml > environment-mac-updated.yml + # sed -E 's/ldm/ldm-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml # CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n ldm-updated | awk ' {print " - " $1 "==" $2;} ' # ``` - albumentations==1.2.1 diff --git a/environment.yaml b/environment.yml similarity index 100% rename from environment.yaml rename to environment.yml From 61790bb76a5260beb4e49e111bad5a37faefa470 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 27 Sep 2022 14:25:27 +1000 Subject: [PATCH 03/14] Fixes #822 --- backend/server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/server.py b/backend/server.py index 5ee28f20f3..622a78fc7c 100644 --- a/backend/server.py +++ b/backend/server.py @@ -672,9 +672,10 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters) and step < generation_parameters["steps"] - 1 ): image = generate.sample_to_image(sample) - path = save_image( - image, generation_parameters, intermediate_path, step_index - ) + + metadata = parameters_to_generated_image_metadata(generation_parameters) + command = parameters_to_command(generation_parameters) + path = save_image(image, command, metadata, intermediate_path, step_index=step_index, postprocessing=False) step_index += 1 socketio.emit( From 52b952526e1b2326a6f3722e14a15aa6c84c55a2 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 27 Sep 2022 15:29:27 +1000 Subject: [PATCH 04/14] Fixes metadata arg value --- backend/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/server.py b/backend/server.py index 622a78fc7c..408fcf0f4c 100644 --- a/backend/server.py +++ b/backend/server.py @@ -683,7 +683,7 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters) { "url": os.path.relpath(path), "mtime": os.path.getmtime(path), - "metadata": generation_parameters, + "metadata": metadata, }, ) socketio.emit("progressUpdate", progress) From 79f23ad03159aca50b77d098b2638e86d522aee9 Mon Sep 17 00:00:00 2001 From: bsilvereagle Date: Mon, 26 Sep 2022 22:40:50 -0400 Subject: [PATCH 05/14] stable-diffusion -> InvokeAI --- docs/features/UPSCALE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/features/UPSCALE.md b/docs/features/UPSCALE.md index f4c06f8b42..1d9e7e0335 100644 --- a/docs/features/UPSCALE.md +++ b/docs/features/UPSCALE.md @@ -17,7 +17,7 @@ Support] below. As of version 1.14, environment.yaml will install the Real-ESRGAN package into the standard install location for python packages, and will put GFPGAN into a -subdirectory of "src" in the stable-diffusion directory. (The reason for this is +subdirectory of "src" in the InvokeAI directory. (The reason for this is that the standard GFPGAN distribution has a minor bug that adversely affects image color.) Upscaling with Real-ESRGAN should "just work" without further intervention. Simply pass the --upscale (-U) option on the dream> command line, @@ -33,7 +33,7 @@ here's how you'd do it using **wget**: > wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth src/gfpgan/experiments/pretrained_models/ ``` -Make sure that you're in the stable-diffusion directory when you do this. +Make sure that you're in the InvokeAI directory when you do this. Alternatively, if you have GFPGAN installed elsewhere, or if you are using an earlier version of this package which asked you to install GFPGAN in a sibling From cad237b4c803fe4e9559f8082d08451b8b1831ff Mon Sep 17 00:00:00 2001 From: ArDiouscuros <72071512+ArDiouscuros@users.noreply.github.com> Date: Tue, 27 Sep 2022 08:59:16 +0200 Subject: [PATCH 06/14] Update documents, add new options information --- docs/features/CLI.md | 2 ++ docs/help/TROUBLESHOOT.md | 10 ++++++++++ docs/other/CONTRIBUTORS.md | 1 + 3 files changed, 13 insertions(+) diff --git a/docs/features/CLI.md b/docs/features/CLI.md index 89b872d46b..4a1580fc0d 100644 --- a/docs/features/CLI.md +++ b/docs/features/CLI.md @@ -98,6 +98,8 @@ overridden on a per-prompt basis (see [List of prompt arguments] | `--gfpgan_dir` | | `src/gfpgan` | Path to where GFPGAN is installed. | | `--gfpgan_model_path` | | `experiments/pretrained_models/GFPGANv1.4.pth` | Path to GFPGAN model file, relative to `--gfpgan_dir`. | | `--device ` | `-d` | `torch.cuda.current_device()` | Device to run SD on, e.g. "cuda:0" | +| `--free_gpu_mem` | | `False` | Free GPU memory after sampling, to allow image decoding and saving in low VRAM conditions | +| `--precision` | | `auto` | Set model precision, default is selected by device. Options: auto, float32, float16, autocast | #### deprecated diff --git a/docs/help/TROUBLESHOOT.md b/docs/help/TROUBLESHOOT.md index 0d57514400..84c62ab3c0 100644 --- a/docs/help/TROUBLESHOOT.md +++ b/docs/help/TROUBLESHOOT.md @@ -116,3 +116,13 @@ branch that contains the pull request: You will need to go through the install procedure again, but it should be fast because all the dependencies are already loaded. + +--- + +### **QUESTION** + +Image generation crashed with CUDA out of memory error after successful sampling. + +### **SOLUTION** + +Try to run script with option `--free_gpu_mem` This will free memory before image decoding step. diff --git a/docs/other/CONTRIBUTORS.md b/docs/other/CONTRIBUTORS.md index 8f40419791..be4f3f407c 100644 --- a/docs/other/CONTRIBUTORS.md +++ b/docs/other/CONTRIBUTORS.md @@ -57,6 +57,7 @@ We thank them for all of their time and hard work. - [Kyle Schouviller](https://github.com/kyle0654) - [rabidcopy](https://github.com/rabidcopy) - [Dominic Letz](https://github.com/dominicletz) +- [Dmitry T.](https://github.com/ArDiouscuros) ## **Original CompVis Authors:** From ac1999929f457d5eec4e8537d34048145201bb2f Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 27 Sep 2022 18:13:45 -0400 Subject: [PATCH 07/14] remove dangling debug statement --- ldm/generate.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ldm/generate.py b/ldm/generate.py index e0468434ea..7f1953a80e 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -829,7 +829,6 @@ class Generate: return model def _load_img(self, path, width, height, fit=False): - print(f'DEBUG: path = {path}') assert os.path.exists(path), f'>> {path}: File not found' # with Image.open(path) as img: From af8383c77044fe861c8adecf2c88b9fda3d0e712 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Tue, 27 Sep 2022 09:15:32 +1300 Subject: [PATCH 08/14] Integrate New WebUI with dream.py --- backend/invoke_ai_web_server.py | 813 ++++++++++++++++++++++++++++++++ backend/modules/parameters.py | 2 +- ldm/dream/args.py | 17 + scripts/dream.py | 38 +- 4 files changed, 843 insertions(+), 27 deletions(-) create mode 100644 backend/invoke_ai_web_server.py diff --git a/backend/invoke_ai_web_server.py b/backend/invoke_ai_web_server.py new file mode 100644 index 0000000000..037baa40fa --- /dev/null +++ b/backend/invoke_ai_web_server.py @@ -0,0 +1,813 @@ +import eventlet +import glob +import os +import shutil + +from flask import Flask, redirect, send_from_directory +from flask_socketio import SocketIO +from PIL import Image +from uuid import uuid4 +from threading import Event + +from ldm.dream.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash +from ldm.dream.pngwriter import PngWriter, retrieve_metadata +from ldm.dream.conditioning import split_weighted_subprompts + +from backend.modules.parameters import parameters_to_command + +# Loading Arguments +opt = Args() +args = opt.parse_args() + + +class InvokeAIWebServer: + def __init__(self, generate, gfpgan, codeformer, esrgan) -> None: + self.host = args.host + self.port = args.port + + self.generate = generate + self.gfpgan = gfpgan + self.codeformer = codeformer + self.esrgan = esrgan + + self.canceled = Event() + + def run(self): + self.setup_app() + self.setup_flask() + + def setup_flask(self): + # Socket IO + logger = True if args.web_verbose else False + engineio_logger = True if args.web_verbose else False + max_http_buffer_size = 10000000 + + # CORS Allowed Setup + cors_allowed_origins = ['http://127.0.0.1:5173', 'http://localhost:5173'] + additional_allowed_origins = ( + opt.cors if opt.cors else [] + ) # additional CORS allowed origins + if self.host == '127.0.0.1': + cors_allowed_origins.extend( + [ + f'http://{self.host}:{self.port}', + f'http://localhost:{self.port}', + ] + ) + cors_allowed_origins = ( + cors_allowed_origins + additional_allowed_origins + ) + + self.app = Flask( + __name__, static_url_path='', static_folder='../frontend/dist/' + ) + + self.socketio = SocketIO( + self.app, + logger=logger, + engineio_logger=engineio_logger, + max_http_buffer_size=max_http_buffer_size, + cors_allowed_origins=cors_allowed_origins, + ping_interval=(50, 50), + ping_timeout=60, + ) + + # Outputs Route + self.app.config['OUTPUTS_FOLDER'] = f'../{args.outdir}' + + @self.app.route('/outputs/') + def outputs(filename): + return send_from_directory( + self.app.config['OUTPUTS_FOLDER'], filename + ) + + # Base Route + @self.app.route('/') + def serve(): + if args.web_develop: + return redirect('http://127.0.0.1:5173') + else: + return send_from_directory( + self.app.static_folder, 'index.html' + ) + + self.load_socketio_listeners(self.socketio) + + print('>> Started Invoke AI Web Server!') + if self.host == '0.0.0.0': + print( + f"Point your browser at http://localhost:{self.port} or use the host's DNS name or IP address." + ) + else: + print( + '>> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.' + ) + print(f'>> Point your browser at http://{self.host}:{self.port}') + + self.socketio.run(app=self.app, host=self.host, port=self.port) + + def setup_app(self): + # location for "finished" images + self.result_path = args.outdir + # temporary path for intermediates + self.intermediate_path = os.path.join( + self.result_path, 'intermediates/' + ) + # path for user-uploaded init images and masks + self.init_image_path = os.path.join(self.result_path, 'init-images/') + self.mask_image_path = os.path.join(self.result_path, 'mask-images/') + # txt log + self.log_path = os.path.join(self.result_path, 'dream_log.txt') + # make all output paths + [ + os.makedirs(path, exist_ok=True) + for path in [ + self.result_path, + self.intermediate_path, + self.init_image_path, + self.mask_image_path, + ] + ] + + def load_socketio_listeners(self, socketio): + @socketio.on('requestSystemConfig') + def handle_request_capabilities(): + print(f'>> System config requested') + config = self.get_system_config() + socketio.emit('systemConfig', config) + + @socketio.on('requestImages') + def handle_request_images(page=1, offset=0, last_mtime=None): + chunk_size = 50 + + if last_mtime: + print(f'>> Latest images requested') + else: + print( + f'>> Page {page} of images requested (page size {chunk_size} offset {offset})' + ) + + paths = glob.glob(os.path.join(self.result_path, '*.png')) + sorted_paths = sorted( + paths, key=lambda x: os.path.getmtime(x), reverse=True + ) + + if last_mtime: + image_paths = filter( + lambda x: os.path.getmtime(x) > last_mtime, sorted_paths + ) + else: + + image_paths = sorted_paths[ + slice( + chunk_size * (page - 1) + offset, + chunk_size * page + offset, + ) + ] + page = page + 1 + + image_array = [] + + for path in image_paths: + metadata = retrieve_metadata(path) + image_array.append( + { + 'url': path, + 'mtime': os.path.getmtime(path), + 'metadata': metadata['sd-metadata'], + } + ) + + socketio.emit( + 'galleryImages', + { + 'images': image_array, + 'nextPage': page, + 'offset': offset, + 'onlyNewImages': True if last_mtime else False, + }, + ) + + @socketio.on('generateImage') + def handle_generate_image_event( + generation_parameters, esrgan_parameters, gfpgan_parameters + ): + print( + f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}' + ) + self.generate_images( + generation_parameters, esrgan_parameters, gfpgan_parameters + ) + + @socketio.on('runESRGAN') + def handle_run_esrgan_event(original_image, esrgan_parameters): + print( + f'>> ESRGAN upscale requested for "{original_image["url"]}": {esrgan_parameters}' + ) + progress = { + 'currentStep': 1, + 'totalSteps': 1, + 'currentIteration': 1, + 'totalIterations': 1, + 'currentStatus': 'Preparing', + 'isProcessing': True, + 'currentStatusHasSteps': False, + } + + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + image = Image.open(original_image['url']) + + seed = ( + original_image['metadata']['seed'] + if 'seed' in original_image['metadata'] + else 'unknown_seed' + ) + + progress['currentStatus'] = 'Upscaling' + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + image = self.esrgan.process( + image=image, + upsampler_scale=esrgan_parameters['upscale'][0], + strength=esrgan_parameters['upscale'][1], + seed=seed, + ) + + progress['currentStatus'] = 'Saving image' + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + esrgan_parameters['seed'] = seed + metadata = self.parameters_to_post_processed_image_metadata( + parameters=esrgan_parameters, + original_image_path=original_image['url'], + type='esrgan', + ) + command = parameters_to_command(esrgan_parameters) + + path = self.save_image( + image, + command, + metadata, + self.result_path, + postprocessing='esrgan', + ) + + self.write_log_message( + f'[Upscaled] "{original_image["url"]}" > "{path}": {command}' + ) + + progress['currentStatus'] = 'Finished' + progress['currentStep'] = 0 + progress['totalSteps'] = 0 + progress['currentIteration'] = 0 + progress['totalIterations'] = 0 + progress['isProcessing'] = False + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + socketio.emit( + 'esrganResult', + { + 'url': os.path.relpath(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata, + }, + ) + + @socketio.on('runGFPGAN') + def handle_run_gfpgan_event(original_image, gfpgan_parameters): + print( + f'>> GFPGAN face fix requested for "{original_image["url"]}": {gfpgan_parameters}' + ) + progress = { + 'currentStep': 1, + 'totalSteps': 1, + 'currentIteration': 1, + 'totalIterations': 1, + 'currentStatus': 'Preparing', + 'isProcessing': True, + 'currentStatusHasSteps': False, + } + + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + image = Image.open(original_image['url']) + + seed = ( + original_image['metadata']['seed'] + if 'seed' in original_image['metadata'] + else 'unknown_seed' + ) + + progress['currentStatus'] = 'Fixing faces' + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + image = self.gfpgan.process( + image=image, + strength=gfpgan_parameters['gfpgan_strength'], + seed=seed, + ) + + progress['currentStatus'] = 'Saving image' + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + gfpgan_parameters['seed'] = seed + metadata = self.parameters_to_post_processed_image_metadata( + parameters=gfpgan_parameters, + original_image_path=original_image['url'], + type='gfpgan', + ) + command = parameters_to_command(gfpgan_parameters) + + path = self.save_image( + image, + command, + metadata, + self.result_path, + postprocessing='gfpgan', + ) + + self.write_log_message( + f'[Fixed faces] "{original_image["url"]}" > "{path}": {command}' + ) + + progress['currentStatus'] = 'Finished' + progress['currentStep'] = 0 + progress['totalSteps'] = 0 + progress['currentIteration'] = 0 + progress['totalIterations'] = 0 + progress['isProcessing'] = False + socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + socketio.emit( + 'gfpganResult', + { + 'url': os.path.relpath(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata, + }, + ) + + @socketio.on('cancel') + def handle_cancel(): + print(f'>> Cancel processing requested') + self.canceled.set() + socketio.emit('processingCanceled') + + # TODO: I think this needs a safety mechanism. + @socketio.on('deleteImage') + def handle_delete_image(path, uuid): + print(f'>> Delete requested "{path}"') + from send2trash import send2trash + + send2trash(path) + socketio.emit('imageDeleted', {'url': path, 'uuid': uuid}) + + # TODO: I think this needs a safety mechanism. + @socketio.on('uploadInitialImage') + def handle_upload_initial_image(bytes, name): + print(f'>> Init image upload requested "{name}"') + uuid = uuid4().hex + split = os.path.splitext(name) + name = f'{split[0]}.{uuid}{split[1]}' + file_path = os.path.join(self.init_image_path, name) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + newFile = open(file_path, 'wb') + newFile.write(bytes) + socketio.emit( + 'initialImageUploaded', {'url': file_path, 'uuid': ''} + ) + + # TODO: I think this needs a safety mechanism. + @socketio.on('uploadMaskImage') + def handle_upload_mask_image(bytes, name): + print(f'>> Mask image upload requested "{name}"') + uuid = uuid4().hex + split = os.path.splitext(name) + name = f'{split[0]}.{uuid}{split[1]}' + file_path = os.path.join(self.mask_image_path, name) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + newFile = open(file_path, 'wb') + newFile.write(bytes) + socketio.emit('maskImageUploaded', {'url': file_path, 'uuid': ''}) + + # App Functions + def get_system_config(self): + return { + 'model': 'stable diffusion', + 'model_id': args.model, + 'model_hash': self.generate.model_hash, + 'app_id': APP_ID, + 'app_version': APP_VERSION, + } + + def generate_images( + self, generation_parameters, esrgan_parameters, gfpgan_parameters + ): + self.canceled.clear() + + step_index = 1 + prior_variations = ( + generation_parameters['with_variations'] + if 'with_variations' in generation_parameters + else [] + ) + """ + If a result image is used as an init image, and then deleted, we will want to be + able to use it as an init image in the future. Need to copy it. + + If the init/mask image doesn't exist in the init_image_path/mask_image_path, + make a unique filename for it and copy it there. + """ + if 'init_img' in generation_parameters: + filename = os.path.basename(generation_parameters['init_img']) + if not os.path.exists( + os.path.join(self.init_image_path, filename) + ): + unique_filename = self.make_unique_init_image_filename( + filename + ) + new_path = os.path.join(self.init_image_path, unique_filename) + shutil.copy(generation_parameters['init_img'], new_path) + generation_parameters['init_img'] = new_path + if 'init_mask' in generation_parameters: + filename = os.path.basename(generation_parameters['init_mask']) + if not os.path.exists( + os.path.join(self.mask_image_path, filename) + ): + unique_filename = self.make_unique_init_image_filename( + filename + ) + new_path = os.path.join( + self.init_image_path, unique_filename + ) + shutil.copy(generation_parameters['init_img'], new_path) + generation_parameters['init_mask'] = new_path + + totalSteps = self.calculate_real_steps( + steps=generation_parameters['steps'], + strength=generation_parameters['strength'] + if 'strength' in generation_parameters + else None, + has_init_image='init_img' in generation_parameters, + ) + + progress = { + 'currentStep': 1, + 'totalSteps': totalSteps, + 'currentIteration': 1, + 'totalIterations': generation_parameters['iterations'], + 'currentStatus': 'Preparing', + 'isProcessing': True, + 'currentStatusHasSteps': False, + } + + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + def image_progress(sample, step): + if self.canceled.is_set(): + raise CanceledException + + nonlocal step_index + nonlocal generation_parameters + nonlocal progress + + progress['currentStep'] = step + 1 + progress['currentStatus'] = 'Generating' + progress['currentStatusHasSteps'] = True + + if ( + generation_parameters['progress_images'] + and step % 5 == 0 + and step < generation_parameters['steps'] - 1 + ): + image = self.generate.sample_to_image(sample) + metadata = self.parameters_to_generated_image_metadata(generation_parameters) + command = parameters_to_command(generation_parameters) + path = self.save_image(image, command, metadata, self.intermediate_path, step_index=step_index, postprocessing=False) + + step_index += 1 + self.socketio.emit( + 'intermediateResult', + { + 'url': os.path.relpath(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata, + }, + ) + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + def image_done(image, seed, first_seed): + nonlocal generation_parameters + nonlocal esrgan_parameters + nonlocal gfpgan_parameters + nonlocal progress + + step_index = 1 + nonlocal prior_variations + + progress['currentStatus'] = 'Generation complete' + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + all_parameters = generation_parameters + postprocessing = False + + if ( + 'variation_amount' in all_parameters + and all_parameters['variation_amount'] > 0 + ): + first_seed = first_seed or seed + this_variation = [[seed, all_parameters['variation_amount']]] + all_parameters['with_variations'] = ( + prior_variations + this_variation + ) + all_parameters['seed'] = first_seed + elif 'with_variations' in all_parameters: + all_parameters['seed'] = first_seed + else: + all_parameters['seed'] = seed + + if esrgan_parameters: + progress['currentStatus'] = 'Upscaling' + progress['currentStatusHasSteps'] = False + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + image = self.esrgan.process( + image=image, + upsampler_scale=esrgan_parameters['level'], + strength=esrgan_parameters['strength'], + seed=seed, + ) + + postprocessing = True + all_parameters['upscale'] = [ + esrgan_parameters['level'], + esrgan_parameters['strength'], + ] + + if gfpgan_parameters: + progress['currentStatus'] = 'Fixing faces' + progress['currentStatusHasSteps'] = False + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + image = self.gfpgan.process( + image=image, + strength=gfpgan_parameters['strength'], + seed=seed, + ) + postprocessing = True + all_parameters['gfpgan_strength'] = gfpgan_parameters[ + 'strength' + ] + + progress['currentStatus'] = 'Saving image' + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + metadata = self.parameters_to_generated_image_metadata( + all_parameters + ) + command = parameters_to_command(all_parameters) + + path = self.save_image( + image, + command, + metadata, + self.result_path, + postprocessing=postprocessing, + ) + + print(f'>> Image generated: "{path}"') + self.write_log_message(f'[Generated] "{path}": {command}') + + if progress['totalIterations'] > progress['currentIteration']: + progress['currentStep'] = 1 + progress['currentIteration'] += 1 + progress['currentStatus'] = 'Iteration finished' + progress['currentStatusHasSteps'] = False + else: + progress['currentStep'] = 0 + progress['totalSteps'] = 0 + progress['currentIteration'] = 0 + progress['totalIterations'] = 0 + progress['currentStatus'] = 'Finished' + progress['isProcessing'] = False + + self.socketio.emit('progressUpdate', progress) + eventlet.sleep(0) + + self.socketio.emit( + 'generationResult', + { + 'url': os.path.relpath(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata, + }, + ) + eventlet.sleep(0) + + try: + self.generate.prompt2image( + **generation_parameters, + step_callback=image_progress, + image_callback=image_done, + ) + + except KeyboardInterrupt: + raise + except CanceledException: + pass + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + import traceback + + traceback.print_exc() + print('\n') + + def parameters_to_generated_image_metadata(self, parameters): + # top-level metadata minus `image` or `images` + metadata = self.get_system_config() + # remove any image keys not mentioned in RFC #266 + rfc266_img_fields = [ + 'type', + 'postprocessing', + 'sampler', + 'prompt', + 'seed', + 'variations', + 'steps', + 'cfg_scale', + 'step_number', + 'width', + 'height', + 'extra', + 'seamless', + ] + + rfc_dict = {} + + for item in parameters.items(): + key, value = item + if key in rfc266_img_fields: + rfc_dict[key] = value + + postprocessing = [] + + # 'postprocessing' is either null or an + if 'gfpgan_strength' in parameters: + + postprocessing.append( + { + 'type': 'gfpgan', + 'strength': float(parameters['gfpgan_strength']), + } + ) + + if 'upscale' in parameters: + postprocessing.append( + { + 'type': 'esrgan', + 'scale': int(parameters['upscale'][0]), + 'strength': float(parameters['upscale'][1]), + } + ) + + rfc_dict['postprocessing'] = ( + postprocessing if len(postprocessing) > 0 else None + ) + + # semantic drift + rfc_dict['sampler'] = parameters['sampler_name'] + + # display weighted subprompts (liable to change) + subprompts = split_weighted_subprompts(parameters['prompt']) + subprompts = [{'prompt': x[0], 'weight': x[1]} for x in subprompts] + rfc_dict['prompt'] = subprompts + + # 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs + variations = [] + + if 'with_variations' in parameters: + variations = [ + {'seed': x[0], 'weight': x[1]} + for x in parameters['with_variations'] + ] + + rfc_dict['variations'] = variations + + if 'init_img' in parameters: + rfc_dict['type'] = 'img2img' + rfc_dict['strength'] = parameters['strength'] + rfc_dict['fit'] = parameters['fit'] # TODO: Noncompliant + rfc_dict['orig_hash'] = calculate_init_img_hash( + parameters['init_img'] + ) + rfc_dict['init_image_path'] = parameters[ + 'init_img' + ] # TODO: Noncompliant + rfc_dict[ + 'sampler' + ] = 'ddim' # TODO: FIX ME WHEN IMG2IMG SUPPORTS ALL SAMPLERS + if 'init_mask' in parameters: + rfc_dict['mask_hash'] = calculate_init_img_hash( + parameters['init_mask'] + ) # TODO: Noncompliant + rfc_dict['mask_image_path'] = parameters[ + 'init_mask' + ] # TODO: Noncompliant + else: + rfc_dict['type'] = 'txt2img' + + metadata['image'] = rfc_dict + + return metadata + + def parameters_to_post_processed_image_metadata( + self, parameters, original_image_path, type + ): + # top-level metadata minus `image` or `images` + metadata = self.get_system_config() + + orig_hash = calculate_init_img_hash(original_image_path) + + image = {'orig_path': original_image_path, 'orig_hash': orig_hash} + + if type == 'esrgan': + image['type'] = 'esrgan' + image['scale'] = parameters['upscale'][0] + image['strength'] = parameters['upscale'][1] + elif type == 'gfpgan': + image['type'] = 'gfpgan' + image['strength'] = parameters['gfpgan_strength'] + else: + raise TypeError(f'Invalid type: {type}') + + metadata['image'] = image + return metadata + + def save_image( + self, + image, + command, + metadata, + output_dir, + step_index=None, + postprocessing=False, + ): + pngwriter = PngWriter(output_dir) + prefix = pngwriter.unique_prefix() + + seed = 'unknown_seed' + + if 'image' in metadata: + if 'seed' in metadata['image']: + seed = metadata['image']['seed'] + + filename = f'{prefix}.{seed}' + + if step_index: + filename += f'.{step_index}' + if postprocessing: + filename += f'.postprocessed' + + filename += '.png' + + path = pngwriter.save_image_and_prompt_to_png( + image=image, dream_prompt=command, metadata=metadata, name=filename + ) + + return path + + def make_unique_init_image_filename(self, name): + uuid = uuid4().hex + split = os.path.splitext(name) + name = f'{split[0]}.{uuid}{split[1]}' + return name + + def calculate_real_steps(self, steps, strength, has_init_image): + import math + return math.floor(strength * steps) if has_init_image else steps + + def write_log_message(self, message): + """Logs the filename and parameters used to generate or process that image to log file""" + message = f'{message}\n' + with open(self.log_path, 'a', encoding='utf-8') as file: + file.writelines(message) + + +class CanceledException(Exception): + pass diff --git a/backend/modules/parameters.py b/backend/modules/parameters.py index ec0cfe8272..d15167e792 100644 --- a/backend/modules/parameters.py +++ b/backend/modules/parameters.py @@ -1,4 +1,4 @@ -from modules.parse_seed_weights import parse_seed_weights +from backend.modules.parse_seed_weights import parse_seed_weights import argparse SAMPLER_CHOICES = [ diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 62ad9ccf01..6c8ec96e42 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -421,6 +421,23 @@ class Args(object): action='store_true', help='Start in web server mode.', ) + web_server_group.add_argument( + '--web_develop', + dest='web_develop', + action='store_true', + help='Start in web server development mode.', + ) + web_server_group.add_argument( + "--web_verbose", + action="store_true", + help="Enables verbose logging", + ) + web_server_group.add_argument( + "--cors", + nargs="*", + type=str, + help="Additional allowed origins, comma-separated", + ) web_server_group.add_argument( '--host', type=str, diff --git a/scripts/dream.py b/scripts/dream.py index cac8c2aee4..c9eb6a0497 100755 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -12,11 +12,12 @@ sys.path.append('.') # corrects a weird problem on Macs import ldm.dream.readline from ldm.dream.args import Args, metadata_dumps, metadata_from_png from ldm.dream.pngwriter import PngWriter -from ldm.dream.server import DreamServer, ThreadingDreamServer from ldm.dream.image_util import make_grid from ldm.dream.log import write_log from omegaconf import OmegaConf +from backend.invoke_ai_web_server import InvokeAIWebServer + # Placeholder to be replaced with proper class that tracks the # outputs and associates with the prompt that generated them. # Just want to get the formatting look right for now. @@ -111,16 +112,16 @@ def main(): #set additional option gen.free_gpu_mem = opt.free_gpu_mem + # web server loops forever + if opt.web: + invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan) + sys.exit(0) + if not infile: print( "\n* Initialization done! Awaiting your command (-h for help, 'q' to quit)" ) - # web server loops forever - if opt.web: - dream_server_loop(gen, opt.host, opt.port, opt.outdir, gfpgan) - sys.exit(0) - main_loop(gen, opt, infile) # TODO: main_loop() has gotten busy. Needs to be refactored. @@ -414,35 +415,20 @@ def get_next_command(infile=None) -> str: # command string print(f'#{command}') return command -def dream_server_loop(gen, host, port, outdir, gfpgan): +def invoke_ai_web_server_loop(gen, gfpgan, codeformer, esrgan): print('\n* --web was specified, starting web server...') # Change working directory to the stable-diffusion directory os.chdir( os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) ) - - # Start server - DreamServer.model = gen # misnomer in DreamServer - this is not the model you are looking for - DreamServer.outdir = outdir - DreamServer.gfpgan_model_exists = False - if gfpgan is not None: - DreamServer.gfpgan_model_exists = gfpgan.gfpgan_model_exists - - dream_server = ThreadingDreamServer((host, port)) - print(">> Started Stable Diffusion dream server!") - if host == '0.0.0.0': - print( - f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.") - else: - print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.") - print(f">> Point your browser at http://{host}:{port}") + + invoke_ai_web_server = InvokeAIWebServer(generate=gen, gfpgan=gfpgan, codeformer=codeformer, esrgan=esrgan) try: - dream_server.serve_forever() + invoke_ai_web_server.run() except KeyboardInterrupt: pass - - dream_server.server_close() + def write_log_message(results, log_path): """logs the name of the output image, prompt, and prompt args to the terminal and log file""" From 984575b57908aea32416fb57b3756bce6ebe03a9 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Wed, 28 Sep 2022 16:00:09 +1300 Subject: [PATCH 09/14] Fix WebUI Integration Bugs Co-Authored-By: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- backend/invoke_ai_web_server.py | 163 ++++++++++++++++++++++---------- 1 file changed, 113 insertions(+), 50 deletions(-) diff --git a/backend/invoke_ai_web_server.py b/backend/invoke_ai_web_server.py index 037baa40fa..b130d47287 100644 --- a/backend/invoke_ai_web_server.py +++ b/backend/invoke_ai_web_server.py @@ -72,15 +72,17 @@ class InvokeAIWebServer: ping_timeout=60, ) - # Outputs Route - self.app.config['OUTPUTS_FOLDER'] = f'../{args.outdir}' - @self.app.route('/outputs/') - def outputs(filename): + # Outputs Route + self.app.config['OUTPUTS_FOLDER'] = os.path.abspath(args.outdir) + + @self.app.route('/outputs/') + def outputs(file_path): return send_from_directory( - self.app.config['OUTPUTS_FOLDER'], filename + self.app.config['OUTPUTS_FOLDER'], file_path ) + # Base Route @self.app.route('/') def serve(): @@ -107,6 +109,10 @@ class InvokeAIWebServer: self.socketio.run(app=self.app, host=self.host, port=self.port) def setup_app(self): + self.result_url = 'outputs/' + self.init_image_url = 'outputs/init-images/' + self.mask_image_url = 'outputs/mask-images/' + self.intermediate_url = 'outputs/intermediates/' # location for "finished" images self.result_path = args.outdir # temporary path for intermediates @@ -172,7 +178,7 @@ class InvokeAIWebServer: metadata = retrieve_metadata(path) image_array.append( { - 'url': path, + 'url': self.get_url_from_image_path(path), 'mtime': os.path.getmtime(path), 'metadata': metadata['sd-metadata'], } @@ -217,7 +223,10 @@ class InvokeAIWebServer: socketio.emit('progressUpdate', progress) eventlet.sleep(0) - image = Image.open(original_image['url']) + original_image_path = self.get_image_path_from_url(original_image['url']) + # os.path.join(self.result_path, os.path.basename(original_image['url'])) + + image = Image.open(original_image_path) seed = ( original_image['metadata']['seed'] @@ -243,7 +252,7 @@ class InvokeAIWebServer: esrgan_parameters['seed'] = seed metadata = self.parameters_to_post_processed_image_metadata( parameters=esrgan_parameters, - original_image_path=original_image['url'], + original_image_path=original_image_path, type='esrgan', ) command = parameters_to_command(esrgan_parameters) @@ -257,7 +266,7 @@ class InvokeAIWebServer: ) self.write_log_message( - f'[Upscaled] "{original_image["url"]}" > "{path}": {command}' + f'[Upscaled] "{original_image_path}" > "{path}": {command}' ) progress['currentStatus'] = 'Finished' @@ -272,7 +281,7 @@ class InvokeAIWebServer: socketio.emit( 'esrganResult', { - 'url': os.path.relpath(path), + 'url': self.get_url_from_image_path(path), 'mtime': os.path.getmtime(path), 'metadata': metadata, }, @@ -296,7 +305,9 @@ class InvokeAIWebServer: socketio.emit('progressUpdate', progress) eventlet.sleep(0) - image = Image.open(original_image['url']) + original_image_path = self.get_image_path_from_url(original_image['url']) + + image = Image.open(original_image_path) seed = ( original_image['metadata']['seed'] @@ -321,7 +332,7 @@ class InvokeAIWebServer: gfpgan_parameters['seed'] = seed metadata = self.parameters_to_post_processed_image_metadata( parameters=gfpgan_parameters, - original_image_path=original_image['url'], + original_image_path=original_image_path, type='gfpgan', ) command = parameters_to_command(gfpgan_parameters) @@ -335,7 +346,7 @@ class InvokeAIWebServer: ) self.write_log_message( - f'[Fixed faces] "{original_image["url"]}" > "{path}": {command}' + f'[Fixed faces] "{original_image_path}" > "{path}": {command}' ) progress['currentStatus'] = 'Finished' @@ -350,7 +361,7 @@ class InvokeAIWebServer: socketio.emit( 'gfpganResult', { - 'url': os.path.relpath(path), + 'url': self.get_url_from_image_path(path), 'mtime': os.path.getmtime(path), 'metadata': metadata, }, @@ -368,6 +379,7 @@ class InvokeAIWebServer: print(f'>> Delete requested "{path}"') from send2trash import send2trash + path = self.get_image_path_from_url(path) send2trash(path) socketio.emit('imageDeleted', {'url': path, 'uuid': uuid}) @@ -382,8 +394,9 @@ class InvokeAIWebServer: os.makedirs(os.path.dirname(file_path), exist_ok=True) newFile = open(file_path, 'wb') newFile.write(bytes) + socketio.emit( - 'initialImageUploaded', {'url': file_path, 'uuid': ''} + 'initialImageUploaded', {'url': self.get_url_from_image_path(file_path), 'uuid': ''} ) # TODO: I think this needs a safety mechanism. @@ -397,7 +410,8 @@ class InvokeAIWebServer: os.makedirs(os.path.dirname(file_path), exist_ok=True) newFile = open(file_path, 'wb') newFile.write(bytes) - socketio.emit('maskImageUploaded', {'url': file_path, 'uuid': ''}) + + socketio.emit('maskImageUploaded', {'url': self.get_url_from_image_path(file_path), 'uuid': ''}) # App Functions def get_system_config(self): @@ -420,37 +434,58 @@ class InvokeAIWebServer: if 'with_variations' in generation_parameters else [] ) + """ + TODO: RE-IMPLEMENT THE COMMENTED-OUT CODE If a result image is used as an init image, and then deleted, we will want to be able to use it as an init image in the future. Need to copy it. If the init/mask image doesn't exist in the init_image_path/mask_image_path, make a unique filename for it and copy it there. """ + # if 'init_img' in generation_parameters: + # filename = os.path.basename(generation_parameters['init_img']) + # abs_init_image_path = os.path.join(self.init_image_path, filename) + # if not os.path.exists( + # abs_init_image_path + # ): + # unique_filename = self.make_unique_init_image_filename( + # filename + # ) + # new_path = os.path.join(self.init_image_path, unique_filename) + # shutil.copy(abs_init_image_path, new_path) + # generation_parameters['init_img'] = os.path.abspath(new_path) + # else: + # generation_parameters['init_img'] = os.path.abspath(os.path.join(self.init_image_path, filename)) + + # if 'init_mask' in generation_parameters: + # filename = os.path.basename(generation_parameters['init_mask']) + # if not os.path.exists( + # os.path.join(self.mask_image_path, filename) + # ): + # unique_filename = self.make_unique_init_image_filename( + # filename + # ) + # new_path = os.path.join( + # self.init_image_path, unique_filename + # ) + # shutil.copy(generation_parameters['init_img'], new_path) + # generation_parameters['init_mask'] = os.path.abspath(new_path) + # else: + # generation_parameters['init_mas'] = os.path.abspath(os.path.join(self.mask_image_path, filename)) + + + # We need to give absolute paths to the generator, stash the URLs for later + init_img_url = None; + mask_img_url = None; + if 'init_img' in generation_parameters: - filename = os.path.basename(generation_parameters['init_img']) - if not os.path.exists( - os.path.join(self.init_image_path, filename) - ): - unique_filename = self.make_unique_init_image_filename( - filename - ) - new_path = os.path.join(self.init_image_path, unique_filename) - shutil.copy(generation_parameters['init_img'], new_path) - generation_parameters['init_img'] = new_path - if 'init_mask' in generation_parameters: - filename = os.path.basename(generation_parameters['init_mask']) - if not os.path.exists( - os.path.join(self.mask_image_path, filename) - ): - unique_filename = self.make_unique_init_image_filename( - filename - ) - new_path = os.path.join( - self.init_image_path, unique_filename - ) - shutil.copy(generation_parameters['init_img'], new_path) - generation_parameters['init_mask'] = new_path + init_img_url = generation_parameters['init_img'] + generation_parameters['init_img'] = self.get_image_path_from_url(generation_parameters['init_img']) + + if 'init_mask' in generation_parameters: + mask_img_url = generation_parameters['init_mask'] + generation_parameters['init_mask'] = self.get_image_path_from_url(generation_parameters['init_mask']) totalSteps = self.calculate_real_steps( steps=generation_parameters['steps'], @@ -493,13 +528,14 @@ class InvokeAIWebServer: image = self.generate.sample_to_image(sample) metadata = self.parameters_to_generated_image_metadata(generation_parameters) command = parameters_to_command(generation_parameters) + path = self.save_image(image, command, metadata, self.intermediate_path, step_index=step_index, postprocessing=False) step_index += 1 self.socketio.emit( 'intermediateResult', { - 'url': os.path.relpath(path), + 'url': self.get_url_from_image_path(path), 'mtime': os.path.getmtime(path), 'metadata': metadata, }, @@ -577,9 +613,17 @@ class InvokeAIWebServer: self.socketio.emit('progressUpdate', progress) eventlet.sleep(0) + # restore the stashed URLS and discard the paths, we are about to send the result to client + if 'init_img' in all_parameters: + all_parameters['init_img'] = init_img_url + + if 'init_mask' in all_parameters: + all_parameters['init_mask'] = mask_img_url + metadata = self.parameters_to_generated_image_metadata( all_parameters ) + command = parameters_to_command(all_parameters) path = self.save_image( @@ -612,7 +656,7 @@ class InvokeAIWebServer: self.socketio.emit( 'generationResult', { - 'url': os.path.relpath(path), + 'url': self.get_url_from_image_path(path), 'mtime': os.path.getmtime(path), 'metadata': metadata, }, @@ -713,9 +757,7 @@ class InvokeAIWebServer: rfc_dict['type'] = 'img2img' rfc_dict['strength'] = parameters['strength'] rfc_dict['fit'] = parameters['fit'] # TODO: Noncompliant - rfc_dict['orig_hash'] = calculate_init_img_hash( - parameters['init_img'] - ) + rfc_dict['orig_hash'] = calculate_init_img_hash(self.get_image_path_from_url(parameters['init_img'])) rfc_dict['init_image_path'] = parameters[ 'init_img' ] # TODO: Noncompliant @@ -723,9 +765,7 @@ class InvokeAIWebServer: 'sampler' ] = 'ddim' # TODO: FIX ME WHEN IMG2IMG SUPPORTS ALL SAMPLERS if 'init_mask' in parameters: - rfc_dict['mask_hash'] = calculate_init_img_hash( - parameters['init_mask'] - ) # TODO: Noncompliant + rfc_dict['mask_hash'] = calculate_init_img_hash(self.get_image_path_from_url(parameters['init_mask'])) # TODO: Noncompliant rfc_dict['mask_image_path'] = parameters[ 'init_mask' ] # TODO: Noncompliant @@ -742,7 +782,7 @@ class InvokeAIWebServer: # top-level metadata minus `image` or `images` metadata = self.get_system_config() - orig_hash = calculate_init_img_hash(original_image_path) + orig_hash = calculate_init_img_hash(self.get_image_path_from_url(original_image_path)) image = {'orig_path': original_image_path, 'orig_hash': orig_hash} @@ -790,7 +830,7 @@ class InvokeAIWebServer: image=image, dream_prompt=command, metadata=metadata, name=filename ) - return path + return os.path.abspath(path) def make_unique_init_image_filename(self, name): uuid = uuid4().hex @@ -808,6 +848,29 @@ class InvokeAIWebServer: with open(self.log_path, 'a', encoding='utf-8') as file: file.writelines(message) + def get_image_path_from_url(self, url): + """Given a url to an image used by the client, returns the absolute file path to that image""" + if 'init-images' in url: + return os.path.abspath(os.path.join(self.init_image_path, os.path.basename(url))) + elif 'mask-images' in url: + return os.path.abspath(os.path.join(self.mask_image_path, os.path.basename(url))) + elif 'intermediates' in url: + return os.path.abspath(os.path.join(self.intermediate_path, os.path.basename(url))) + else: + return os.path.abspath(os.path.join(self.result_path, os.path.basename(url))) + + def get_url_from_image_path(self, path): + """Given an absolute file path to an image, returns the URL that the client can use to load the image""" + if 'init-images' in path: + return os.path.join(self.init_image_url, os.path.basename(path)) + elif 'mask-images' in path: + return os.path.join(self.mask_image_url, os.path.basename(path)) + elif 'intermediates' in path: + return os.path.join(self.intermediate_url, os.path.basename(path)) + else: + return os.path.join(self.result_url, os.path.basename(path)) + + class CanceledException(Exception): - pass + pass \ No newline at end of file From 800f9615c2fea5da3ffea5a844f9e6c2723c1fc9 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 27 Sep 2022 14:27:55 -0400 Subject: [PATCH 10/14] implement history viewing & replaying in CLI - Enhance tab completion functionality - Each of the switches that read a filepath (e.g. --init_img) will trigger file path completion. The -S switch will display a list of recently-used seeds. - Added new !fetch command to retrieve the metadata from a previously-generated image and populate the readline linebuffer with the appropriate editable command to regenerate. - Added new !history command to display previous commands and reload them for modification. - The !fetch and !fix commands both autocomplete *and* search automatically through the current outdir for files. - The completer maintains a list of recently used seeds and will try to autocomplete them. --- ldm/dream/args.py | 16 ++- ldm/dream/readline.py | 278 +++++++++++++++++++++++++++++++----------- scripts/dream.py | 194 +++++++++++++++++++---------- 3 files changed, 352 insertions(+), 136 deletions(-) diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 62ad9ccf01..239921de2b 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -88,6 +88,7 @@ import hashlib import os import copy import base64 +import functools import ldm.dream.pngwriter from ldm.dream.conditioning import split_weighted_subprompts @@ -220,9 +221,15 @@ class Args(object): # outpainting parameters if a['out_direction']: switches.append(f'-D {" ".join([str(u) for u in a["out_direction"]])}') + # LS: slight semantic drift which needs addressing in the future: + # 1. Variations come out of the stored metadata as a packed string with the keyword "variations" + # 2. However, they come out of the CLI (and probably web) with the keyword "with_variations" and + # in broken-out form. Variation (1) should be changed to comply with (2) if a['with_variations']: - formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in (a["with_variations"])) - switches.append(f'-V {formatted_variations}') + formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in (a["variations"])) + switches.append(f'-V {a["formatted_variations"]}') + if 'variations' in a: + switches.append(f'-V {a["variations"]}') return ' '.join(switches) def __getattribute__(self,name): @@ -732,6 +739,7 @@ def metadata_dumps(opt, return metadata +@functools.lru_cache(maxsize=50) def metadata_from_png(png_file_path): ''' Given the path to a PNG file created by dream.py, retrieves @@ -741,6 +749,10 @@ def metadata_from_png(png_file_path): opts = metadata_loads(meta) return opts[0] +def dream_cmd_from_png(png_file_path): + opt = metadata_from_png(png_file_path) + return opt.dream_prompt_str() + def metadata_loads(metadata): ''' Takes the dictionary corresponding to RFC266 (https://github.com/lstein/stable-diffusion/issues/266) diff --git a/ldm/dream/readline.py b/ldm/dream/readline.py index 42f4a3ac01..6101b1cebd 100644 --- a/ldm/dream/readline.py +++ b/ldm/dream/readline.py @@ -1,38 +1,92 @@ """ Readline helper functions for dream.py (linux and mac only). +You may import the global singleton `completer` to get access to the +completer object itself. This is useful when you want to autocomplete +seeds: + + from ldm.dream.readline import completer + completer.add_seed(18247566) + completer.add_seed(9281839) """ import os import re import atexit +completer = None + # ---------------readline utilities--------------------- try: import readline - readline_available = True except: readline_available = False +IMG_EXTENSIONS = ('.png','.jpg','.jpeg') +COMMANDS = ( + '--steps','-s', + '--seed','-S', + '--iterations','-n', + '--width','-W','--height','-H', + '--cfg_scale','-C', + '--grid','-g', + '--individual','-i', + '--init_img','-I', + '--init_mask','-M', + '--init_color', + '--strength','-f', + '--variants','-v', + '--outdir','-o', + '--sampler','-A','-m', + '--embedding_path', + '--device', + '--grid','-g', + '--gfpgan_strength','-G', + '--upscale','-U', + '-save_orig','--save_original', + '--skip_normalize','-x', + '--log_tokenization','-t', + '!fix','!fetch', + ) +IMG_PATH_COMMANDS = ( + '--init_img[=\s]','-I', + '--init_mask[=\s]','-M', + '--init_color[=\s]', + '--embedding_path[=\s]', + '--outdir[=\s]' + ) +IMG_FILE_COMMANDS=( + '!fix', + '!fetch', + ) +path_regexp = '('+'|'.join(IMG_PATH_COMMANDS+IMG_FILE_COMMANDS) + ')\s*\S*$' class Completer: def __init__(self, options): - self.options = sorted(options) + self.options = sorted(options) + self.seeds = set() + self.matches = list() + self.default_dir = None + self.linebuffer = None return def complete(self, text, state): + ''' + Completes dream command line. + BUG: it doesn't correctly complete files that have spaces in the name. + ''' buffer = readline.get_line_buffer() - if text.startswith(('-I', '--init_img','-M','--init_mask', - '--init_color')): - return self._path_completions(text, state, ('.png','.jpg','.jpeg')) - - if buffer.strip().endswith('pp') or text.startswith(('.', '/')): - return self._path_completions(text, state, ('.png','.jpg','.jpeg')) - - response = None if state == 0: + if re.search(path_regexp,buffer): + do_shortcut = re.search('^'+'|'.join(IMG_FILE_COMMANDS),buffer) + self.matches = self._path_completions(text, state, IMG_EXTENSIONS,shortcut_ok=do_shortcut) + + # looking for a seed + elif re.search('(-S\s*|--seed[=\s])\d*$',buffer): + self.matches= self._seed_completions(text,state) + # This is the first time for this text, so build a match list. - if text: + elif text: self.matches = [ s for s in self.options if s and s.startswith(text) ] @@ -47,81 +101,158 @@ class Completer: response = None return response - def _path_completions(self, text, state, extensions): - # get the path so far - # TODO: replace this mess with a regular expression match - if text.startswith('-I'): - path = text.replace('-I', '', 1).lstrip() - elif text.startswith('--init_img='): - path = text.replace('--init_img=', '', 1).lstrip() - elif text.startswith('--init_mask='): - path = text.replace('--init_mask=', '', 1).lstrip() - elif text.startswith('-M'): - path = text.replace('-M', '', 1).lstrip() - elif text.startswith('--init_color='): - path = text.replace('--init_color=', '', 1).lstrip() + def add_to_history(self,line): + ''' + This is a no-op; readline handles this automatically. But we provide it + for DummyReadline compatibility. + ''' + pass + + def add_seed(self, seed): + ''' + Add a seed to the autocomplete list for display when -S is autocompleted. + ''' + if seed is not None: + self.seeds.add(str(seed)) + + def set_default_dir(self, path): + self.default_dir=path + + def get_line(self,index): + try: + line = self.get_history_item(index) + except IndexError: + return None + return line + + def get_current_history_length(self): + return readline.get_current_history_length() + + def get_history_item(self,index): + return readline.get_history_item(index) + + def show_history(self): + ''' + Print the session history using the pydoc pager + ''' + import pydoc + lines = list() + h_len = self.get_current_history_length() + if h_len < 1: + print('') + return + + for i in range(0,h_len): + lines.append(f'[{i+1}] {self.get_history_item(i+1)}') + pydoc.pager('\n'.join(lines)) + + def set_line(self,line)->None: + self.linebuffer = line + readline.redisplay() + + def _seed_completions(self, text, state): + m = re.search('(-S\s?|--seed[=\s]?)(\d*)',text) + if m: + switch = m.groups()[0] + partial = m.groups()[1] else: - path = text + switch = '' + partial = text matches = list() + for s in self.seeds: + if s.startswith(partial): + matches.append(switch+s) + matches.sort() + return matches - path = os.path.expanduser(path) - if len(path) == 0: - matches.append(text + './') + def _pre_input_hook(self): + if self.linebuffer: + readline.insert_text(self.linebuffer) + readline.redisplay() + self.linebuffer = None + + def _path_completions(self, text, state, extensions, shortcut_ok=False): + # separate the switch from the partial path + match = re.search('^(-\w|--\w+=?)(.*)',text) + if match is None: + switch = None + partial_path = text else: + switch,partial_path = match.groups() + partial_path = partial_path.lstrip() + + matches = list() + path = os.path.expanduser(partial_path) + + if os.path.isdir(path): + dir = path + elif os.path.dirname(path) != '': dir = os.path.dirname(path) - dir_list = os.listdir(dir) - for n in dir_list: - if n.startswith('.') and len(n) > 1: - continue - full_path = os.path.join(dir, n) - if full_path.startswith(path): - if os.path.isdir(full_path): - matches.append( - os.path.join(os.path.dirname(text), n) + '/' - ) - elif n.endswith(extensions): - matches.append(os.path.join(os.path.dirname(text), n)) + else: + dir = '' + path= os.path.join(dir,path) - try: - response = matches[state] - except IndexError: - response = None - return response + dir_list = os.listdir(dir or '.') + if shortcut_ok and os.path.exists(self.default_dir) and dir=='': + dir_list += os.listdir(self.default_dir) + for node in dir_list: + if node.startswith('.') and len(node) > 1: + continue + full_path = os.path.join(dir, node) + + if not (node.endswith(extensions) or os.path.isdir(full_path)): + continue + + if not full_path.startswith(path): + continue + + if switch is None: + match_path = os.path.join(dir,node) + matches.append(match_path+'/' if os.path.isdir(full_path) else match_path) + elif os.path.isdir(full_path): + matches.append( + switch+os.path.join(os.path.dirname(full_path), node) + '/' + ) + elif node.endswith(extensions): + matches.append( + switch+os.path.join(os.path.dirname(full_path), node) + ) + return matches + +class DummyCompleter(Completer): + def __init__(self,options): + super().__init__(options) + self.history = list() + + def add_to_history(self,line): + self.history.append(line) + + def get_current_history_length(self): + return len(self.history) + + def get_history_item(self,index): + return self.history[index-1] + + def set_line(self,line): + print(f'# {line}') if readline_available: + completer = Completer(COMMANDS) + readline.set_completer( - Completer( - [ - '--steps','-s', - '--seed','-S', - '--iterations','-n', - '--width','-W','--height','-H', - '--cfg_scale','-C', - '--grid','-g', - '--individual','-i', - '--init_img','-I', - '--init_mask','-M', - '--init_color', - '--strength','-f', - '--variants','-v', - '--outdir','-o', - '--sampler','-A','-m', - '--embedding_path', - '--device', - '--grid','-g', - '--gfpgan_strength','-G', - '--upscale','-U', - '-save_orig','--save_original', - '--skip_normalize','-x', - '--log_tokenization','t', - ] - ).complete + completer.complete ) + readline.set_pre_input_hook(completer._pre_input_hook) readline.set_completer_delims(' ') readline.parse_and_bind('tab: complete') - + readline.parse_and_bind('set print-completions-horizontally off') + readline.parse_and_bind('set page-completions on') + readline.parse_and_bind('set skip-completed-text on') + readline.parse_and_bind('set bell-style visible') + readline.parse_and_bind('set show-all-if-ambiguous on') + histfile = os.path.join(os.path.expanduser('~'), '.dream_history') try: readline.read_history_file(histfile) @@ -129,3 +260,6 @@ if readline_available: except FileNotFoundError: pass atexit.register(readline.write_history_file, histfile) + +else: + completer = DummyCompleter(COMMANDS) diff --git a/scripts/dream.py b/scripts/dream.py index cac8c2aee4..246418e60b 100755 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -9,8 +9,8 @@ import copy import warnings import time sys.path.append('.') # corrects a weird problem on Macs -import ldm.dream.readline -from ldm.dream.args import Args, metadata_dumps, metadata_from_png +from ldm.dream.readline import completer +from ldm.dream.args import Args, metadata_dumps, metadata_from_png, dream_cmd_from_png from ldm.dream.pngwriter import PngWriter from ldm.dream.server import DreamServer, ThreadingDreamServer from ldm.dream.image_util import make_grid @@ -141,7 +141,10 @@ def main_loop(gen, opt, infile): while not done: operation = 'generate' # default operation, alternative is 'postprocess' - + + if completer: + completer.set_default_dir(opt.outdir) + try: command = get_next_command(infile) except EOFError: @@ -159,16 +162,28 @@ def main_loop(gen, opt, infile): done = True break - if command.startswith( - '!dream' - ): # in case a stored prompt still contains the !dream command + if command.startswith('!dream'): # in case a stored prompt still contains the !dream command command = command.replace('!dream ','',1) - if command.startswith( - '!fix' - ): + if command.startswith('!fix'): command = command.replace('!fix ','',1) operation = 'postprocess' + + if command.startswith('!fetch'): + file_path = command.replace('!fetch ','',1) + retrieve_dream_command(opt,file_path) + continue + + if command == '!history': + completer.show_history() + continue + + match = re.match('^!(\d+)',command) + if match: + command_no = match.groups()[0] + command = completer.get_line(int(command_no)) + completer.set_line(command) + continue if opt.parse_cmd(command) is None: continue @@ -219,37 +234,15 @@ def main_loop(gen, opt, infile): opt.strength = 0.75 if opt.out_direction is None else 0.83 if opt.with_variations is not None: - # shotgun parsing, woo - parts = [] - broken = False # python doesn't have labeled loops... - for part in opt.with_variations.split(','): - seed_and_weight = part.split(':') - if len(seed_and_weight) != 2: - print(f'could not parse with_variation part "{part}"') - broken = True - break - try: - seed = int(seed_and_weight[0]) - weight = float(seed_and_weight[1]) - except ValueError: - print(f'could not parse with_variation part "{part}"') - broken = True - break - parts.append([seed, weight]) - if broken: - continue - if len(parts) > 0: - opt.with_variations = parts - else: - opt.with_variations = None + opt.with_variations = split_variations(opt.with_variations) if opt.prompt_as_dir: # sanitize the prompt to a valid folder name subdir = path_filter.sub('_', opt.prompt)[:name_max].rstrip(' .') # truncate path to maximum allowed length - # 27 is the length of '######.##########.##.png', plus two separators and a NUL - subdir = subdir[:(path_max - 27 - len(os.path.abspath(opt.outdir)))] + # 39 is the length of '######.##########.##########-##.png', plus two separators and a NUL + subdir = subdir[:(path_max - 39 - len(os.path.abspath(opt.outdir)))] current_outdir = os.path.join(opt.outdir, subdir) print('Writing files to directory: "' + current_outdir + '"') @@ -280,23 +273,17 @@ def main_loop(gen, opt, infile): if opt.grid: grid_images[seed] = image else: - if operation == 'postprocess': - filename = choose_postprocess_name(opt.prompt) - elif upscaled and opt.save_original: - filename = f'{prefix}.{seed}.postprocessed.png' - else: - filename = f'{prefix}.{seed}.png' - if opt.variation_amount > 0: - first_seed = first_seed or seed - this_variation = [[seed, opt.variation_amount]] - opt.with_variations = prior_variations + this_variation - formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed) - elif len(prior_variations) > 0: - formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed) - elif operation == 'postprocess': - formatted_dream_prompt = '!fix '+opt.dream_prompt_str(seed=seed) - else: - formatted_dream_prompt = opt.dream_prompt_str(seed=seed) + postprocessed = upscaled if upscaled else operation=='postprocess' + filename, formatted_dream_prompt = prepare_image_metadata( + opt, + prefix, + seed, + operation, + prior_variations, + postprocessed, + first_seed + ) + path = file_writer.save_image_and_prompt_to_png( image = image, dream_prompt = formatted_dream_prompt, @@ -310,10 +297,15 @@ def main_loop(gen, opt, infile): if (not upscaled) or opt.save_original: # only append to results if we didn't overwrite an earlier output results.append([path, formatted_dream_prompt]) + # so that the seed autocompletes (on linux|mac when -S or --seed specified + if completer: + completer.add_seed(seed) + completer.add_seed(first_seed) last_results.append([path, seed]) if operation == 'generate': catch_ctrl_c = infile is None # if running interactively, we catch keyboard interrupts + opt.last_operation='generate' gen.prompt2image( image_callback=image_writer, catch_interrupts=catch_ctrl_c, @@ -321,7 +313,7 @@ def main_loop(gen, opt, infile): ) elif operation == 'postprocess': print(f'>> fixing {opt.prompt}') - do_postprocess(gen,opt,image_writer) + opt.last_operation = do_postprocess(gen,opt,image_writer) if opt.grid and len(grid_images) > 0: grid_img = make_grid(list(grid_images.values())) @@ -356,6 +348,7 @@ def main_loop(gen, opt, infile): global output_cntr output_cntr = write_log(results, log_path ,('txt', 'md'), output_cntr) print() + completer.add_to_history(command) print('goodbye!') @@ -377,7 +370,8 @@ def do_postprocess (gen, opt, callback): elif opt.out_direction: tool = 'outpaint' opt.save_original = True # do not overwrite old image! - return gen.apply_postprocessor( + opt.last_operation = f'postprocess:{tool}' + gen.apply_postprocessor( image_path = opt.prompt, tool = tool, gfpgan_strength = opt.gfpgan_strength, @@ -388,18 +382,54 @@ def do_postprocess (gen, opt, callback): callback = callback, opt = opt, ) + return opt.last_operation -def choose_postprocess_name(original_filename): - basename,_ = os.path.splitext(os.path.basename(original_filename)) - if re.search('\d+\.\d+$',basename): - return f'{basename}.fixed.png' - match = re.search('(\d+\.\d+)\.fixed(-(\d+))?$',basename) - if match: - counter = match.group(3) or 0 - return '{prefix}-{counter:02d}.png'.format(prefix=match.group(1), counter=int(counter)+1) +def prepare_image_metadata( + opt, + prefix, + seed, + operation='generate', + prior_variations=[], + postprocessed=False, + first_seed=None +): + + if postprocessed and opt.save_original: + filename = choose_postprocess_name(opt,prefix,seed) else: - return f'{basename}.fixed.png' + filename = f'{prefix}.{seed}.png' + if opt.variation_amount > 0: + first_seed = first_seed or seed + this_variation = [[seed, opt.variation_amount]] + opt.with_variations = prior_variations + this_variation + formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed) + elif len(prior_variations) > 0: + formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed) + elif operation == 'postprocess': + formatted_dream_prompt = '!fix '+opt.dream_prompt_str(seed=seed) + else: + formatted_dream_prompt = opt.dream_prompt_str(seed=seed) + return filename,formatted_dream_prompt + +def choose_postprocess_name(opt,prefix,seed) -> str: + match = re.search('postprocess:(\w+)',opt.last_operation) + if match: + modifier = match.group(1) # will look like "gfpgan", "upscale", "outpaint" or "embiggen" + else: + modifier = 'postprocessed' + + counter = 0 + filename = None + available = False + while not available: + if counter > 0: + filename = f'{prefix}.{seed}.{modifier}.png' + else: + filename = f'{prefix}.{seed}.{modifier}-{counter:02d}.png' + available = not os.path.exists(os.path.join(opt.outdir,filename)) + counter += 1 + return filename def get_next_command(infile=None) -> str: # command string if infile is None: @@ -444,6 +474,46 @@ def dream_server_loop(gen, host, port, outdir, gfpgan): dream_server.server_close() +def split_variations(variations_string) -> list: + # shotgun parsing, woo + parts = [] + broken = False # python doesn't have labeled loops... + for part in variations_string.split(','): + seed_and_weight = part.split(':') + if len(seed_and_weight) != 2: + print(f'** Could not parse with_variation part "{part}"') + broken = True + break + try: + seed = int(seed_and_weight[0]) + weight = float(seed_and_weight[1]) + except ValueError: + print(f'** Could not parse with_variation part "{part}"') + broken = True + break + parts.append([seed, weight]) + if broken: + return None + elif len(parts) == 0: + return None + else: + return parts + +def retrieve_dream_command(opt,file_path): + ''' + Given a full or partial path to a previously-generated image file, + will retrieve and format the dream command used to generate the image, + and pop it into the readline buffer (linux, Mac), or print out a comment + for cut-and-paste (windows) + ''' + dir,basename = os.path.split(file_path) + if len(dir) == 0: + path = os.path.join(opt.outdir,basename) + else: + path = file_path + cmd = dream_cmd_from_png(path) + completer.set_line(cmd) + def write_log_message(results, log_path): """logs the name of the output image, prompt, and prompt args to the terminal and log file""" global output_cntr From dff4850a8259de02705452dcc2d3dc30bb643888 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 28 Sep 2022 11:48:11 -0400 Subject: [PATCH 11/14] add documentation and bug fixes - normalized how filenames are written out when postprocessing invoked - various fixes of bugs encountered during testing - updated documentation - updated help text --- docs/features/CLI.md | 79 +++++++++++++++++++++++++++++++++++++++++++ ldm/dream/args.py | 32 ++++++++++++++++-- ldm/dream/log.py | 13 ++++--- ldm/dream/readline.py | 20 ++++++++--- ldm/generate.py | 32 ++++++++++-------- scripts/dream.py | 37 +++++++++----------- 6 files changed, 166 insertions(+), 47 deletions(-) diff --git a/docs/features/CLI.md b/docs/features/CLI.md index 4a1580fc0d..530d659c64 100644 --- a/docs/features/CLI.md +++ b/docs/features/CLI.md @@ -205,6 +205,85 @@ well as the --mask (-M) argument: | --init_mask | -M | None |Path to an image the same size as the initial_image, with areas for inpainting made transparent.| +# Convenience commands + +In addition to the standard image generation arguments, there are a +series of convenience commands that begin with !: + +## !fix + +This command runs a post-processor on a previously-generated image. It +takes a PNG filename or path and applies your choice of the -U, -G, or +--embiggen switches in order to fix faces or upscale. If you provide a +filename, the script will look for it in the current output +directory. Otherwise you can provide a full or partial path to the +desired file. + +Some examples: + +Upscale to 4X its original size and fix faces using codeformer: +~~~ +dream> !fix 0000045.4829112.png -G1 -U4 -ft codeformer +~~~ + +Use the GFPGAN algorithm to fix faces, then upscale to 3X using --embiggen: + +~~~ +dream> !fix 0000045.4829112.png -G0.8 -ft gfpgan +>> fixing outputs/img-samples/0000045.4829112.png +>> retrieved seed 4829112 and prompt "boy enjoying a banana split" +>> GFPGAN - Restoring Faces for image seed:4829112 +Outputs: +[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8 + +dream> !fix 000017.4829112.gfpgan-00.png --embiggen 3 +...lots of text... +Outputs: +[2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix "outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512 -H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 +~~~ + +## !fetch + +This command retrieves the generation parameters from a previously +generated image and either loads them into the command line +(Linux|Mac), or prints them out in a comment for copy-and-paste +(Windows). You may provide either the name of a file in the current +output directory, or a full file path. + +~~~ +dream> !fetch 0000015.8929913.png +# the script returns the next line, ready for editing and running: +dream> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 +~~~ + +Note that this command may behave unexpectedly if given a PNG file that +was not generated by InvokeAI. + +## !history + +The dream script keeps track of all the commands you issue during a +session, allowing you to re-run them. On Mac and Linux systems, it +also writes the command-line history out to disk, giving you access to +the most recent 1000 commands issued. + +The `!history` command will return a numbered list of all the commands +issued during the session (Windows), or the most recent 1000 commands +(Mac|Linux). You can then repeat a command by using the command !NNN, +where "NNN" is the history line number. For example: + +~~~ +dream> !history +... +[14] happy woman sitting under tree wearing broad hat and flowing garment +[15] beautiful woman sitting under tree wearing broad hat and flowing garment +[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 +[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194 +[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194 +... +dream> !20 +dream> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194 +~~~ + # Command-line editing and completion If you are on a Macintosh or Linux machine, the command-line offers diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 239921de2b..92d5acee0c 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -446,8 +446,36 @@ class Args(object): def _create_dream_cmd_parser(self): parser = argparse.ArgumentParser( description=""" - Generate example: dream> a fantastic alien landscape -W576 -H512 -s60 -n4 - Postprocess example: dream> !pp 0000045.4829112.png -G1 -U4 -ft codeformer + *Image generation:* + To generate images, type a text prompt with optional switches. Example: + a fantastic alien landscape -W576 -H512 -s60 -n4 + + *postprocessing* + To post-process a previously-generated image, use the "!fix" command, and + provide the image filename and postprocessing options. You may provide either the filename, + in which case the script will look in the current output directory, or an arbitrary absolute or + relative path to the desired PNG file. + -G (strength) - apply face-fixing, e.g. -G0.8 + -U (scaleg) - upscale to the desired dimensions with ersgan, e.g. -U2 + --embiggen (scale) - upscale using the embiggen algorithm + -ft (algorithm) - select which face-fixing algorithm to use (gfpgan|codeformer) + + Example: !fix 0000045.4829112.png -G1 -U4 -ft codeformer + + *History manipulation* + Use !fetch to retrieve the image generation parameters used to generate a previously-generated + image. The original command will be inserted onto the command line for editing (Linux, Mac), or + printed as a comment above the dream> prompt (Windows). If a bare filename is provided, the script + will look in the current output directory + + Example: dream> !fetch 0000015.8929913.png + dream> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 + + Use !history to get a numbered list of the past 1000 commands (Linux, Mac) or the commands issued + during the current session (Windows). + + Use !NN to retrieve the NNth command from the history list and load it into the command line + for editing and re-issuing. """ ) render_group = parser.add_argument_group('General rendering') diff --git a/ldm/dream/log.py b/ldm/dream/log.py index 783a62d4f9..beca10fa3f 100644 --- a/ldm/dream/log.py +++ b/ldm/dream/log.py @@ -23,11 +23,14 @@ def write_log(results, log_path, file_types, output_cntr): def write_log_message(results, output_cntr): """logs to the terminal""" log_lines = [f"{path}: {prompt}\n" for path, prompt in results] - for l in log_lines: - output_cntr += 1 - print(f"[{output_cntr}] {l}", end="") - return output_cntr - + if len(log_lines)>1: + subcntr = 1 + for l in log_lines: + print(f"[{output_cntr}.{subcntr}] {l}", end="") + subcntr += 1 + else: + print(f"[{output_cntr}] {log_lines[0]}", end="") + return output_cntr+1 def write_log_files(results, log_path, file_types): for file_type in file_types: diff --git a/ldm/dream/readline.py b/ldm/dream/readline.py index 6101b1cebd..4bee51233f 100644 --- a/ldm/dream/readline.py +++ b/ldm/dream/readline.py @@ -21,6 +21,10 @@ try: except: readline_available = False +#to simulate what happens on windows systems, uncomment +# this line +#readline_available = False + IMG_EXTENSIONS = ('.png','.jpg','.jpeg') COMMANDS = ( '--steps','-s', @@ -101,12 +105,14 @@ class Completer: response = None return response - def add_to_history(self,line): + def add_history(self,line): ''' - This is a no-op; readline handles this automatically. But we provide it - for DummyReadline compatibility. + Pass thru to readline ''' - pass + readline.add_history(line) + + def remove_history_item(self,pos): + readline.remove_history_item(pos) def add_seed(self, seed): ''' @@ -226,7 +232,7 @@ class DummyCompleter(Completer): super().__init__(options) self.history = list() - def add_to_history(self,line): + def add_history(self,line): self.history.append(line) def get_current_history_length(self): @@ -235,6 +241,9 @@ class DummyCompleter(Completer): def get_history_item(self,index): return self.history[index-1] + def remove_history_item(self,index): + return self.history.pop(index-1) + def set_line(self,line): print(f'# {line}') @@ -244,6 +253,7 @@ if readline_available: readline.set_completer( completer.complete ) + readline.set_auto_history(False) readline.set_pre_input_hook(completer._pre_input_hook) readline.set_completer_delims(' ') readline.parse_and_bind('tab: complete') diff --git a/ldm/generate.py b/ldm/generate.py index 8456014ec2..ffaaf8193e 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -490,25 +490,26 @@ class Generate: opt = None, ): # retrieve the seed from the image; - # note that we will try both the new way and the old way, since not all files have the - # metadata (yet) seed = None image_metadata = None prompt = None - try: - args = metadata_from_png(image_path) - seed = args.seed - prompt = args.prompt - print(f'>> retrieved seed {seed} and prompt "{prompt}" from {image_path}') - except: - m = re.search('(\d+)\.png$',image_path) - if m: - seed = m.group(1) + + args = metadata_from_png(image_path) + seed = args.seed + prompt = args.prompt + print(f'>> retrieved seed {seed} and prompt "{prompt}" from {image_path}') if not seed: print('* Could not recover seed for image. Replacing with 42. This will not affect image quality') seed = 42 - + + # try to reuse the same filename prefix as the original file. + # note that this is hacky + prefix = None + m = re.search('(\d+)\.',os.path.basename(image_path)) + if m: + prefix = m.groups()[0] + # face fixers and esrgan take an Image, but embiggen takes a path image = Image.open(image_path) @@ -530,6 +531,7 @@ class Generate: save_original = save_original, upscale = upscale, image_callback = callback, + prefix = prefix, ) elif tool == 'embiggen': @@ -716,7 +718,9 @@ class Generate: strength = 0.0, codeformer_fidelity = 0.75, save_original = False, - image_callback = None): + image_callback = None, + prefix = None, + ): for r in image_list: image, seed = r @@ -750,7 +754,7 @@ class Generate: ) if image_callback is not None: - image_callback(image, seed, upscaled=True) + image_callback(image, seed, upscaled=True, use_prefix=prefix) else: r[0] = image diff --git a/scripts/dream.py b/scripts/dream.py index 246418e60b..1f3cc669bc 100755 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -17,10 +17,9 @@ from ldm.dream.image_util import make_grid from ldm.dream.log import write_log from omegaconf import OmegaConf -# Placeholder to be replaced with proper class that tracks the -# outputs and associates with the prompt that generated them. -# Just want to get the formatting look right for now. -output_cntr = 0 +# The output counter labels each output and is keyed to the +# command-line history +output_cntr = completer.get_current_history_length()+1 def main(): """Initialize command-line parsers and the diffusion model""" @@ -259,17 +258,21 @@ def main_loop(gen, opt, infile): last_results = [] try: file_writer = PngWriter(current_outdir) - prefix = file_writer.unique_prefix() results = [] # list of filename, prompt pairs grid_images = dict() # seed -> Image, only used if `opt.grid` prior_variations = opt.with_variations or [] - def image_writer(image, seed, upscaled=False, first_seed=None): + def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None): # note the seed is the seed of the current image # the first_seed is the original seed that noise is added to # when the -v switch is used to generate variations - path = None nonlocal prior_variations + if use_prefix is not None: + prefix = use_prefix + else: + prefix = file_writer.unique_prefix() + + path = None if opt.grid: grid_images[seed] = image else: @@ -348,7 +351,10 @@ def main_loop(gen, opt, infile): global output_cntr output_cntr = write_log(results, log_path ,('txt', 'md'), output_cntr) print() - completer.add_to_history(command) + if operation == 'postprocess': + completer.add_history(f'!fix {command}') + else: + completer.add_history(command) print('goodbye!') @@ -372,7 +378,7 @@ def do_postprocess (gen, opt, callback): opt.save_original = True # do not overwrite old image! opt.last_operation = f'postprocess:{tool}' gen.apply_postprocessor( - image_path = opt.prompt, + image_path = file_path, tool = tool, gfpgan_strength = opt.gfpgan_strength, codeformer_fidelity = opt.codeformer_fidelity, @@ -423,7 +429,7 @@ def choose_postprocess_name(opt,prefix,seed) -> str: filename = None available = False while not available: - if counter > 0: + if counter == 0: filename = f'{prefix}.{seed}.{modifier}.png' else: filename = f'{prefix}.{seed}.{modifier}-{counter:02d}.png' @@ -514,16 +520,5 @@ def retrieve_dream_command(opt,file_path): cmd = dream_cmd_from_png(path) completer.set_line(cmd) -def write_log_message(results, log_path): - """logs the name of the output image, prompt, and prompt args to the terminal and log file""" - global output_cntr - log_lines = [f'{path}: {prompt}\n' for path, prompt in results] - for l in log_lines: - output_cntr += 1 - print(f'[{output_cntr}] {l}',end='') - - with open(log_path, 'a', encoding='utf-8') as file: - file.writelines(log_lines) - if __name__ == '__main__': main() From e49e83e944de5e1aecff5cd99df682fea4b523d6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 28 Sep 2022 11:57:59 -0400 Subject: [PATCH 12/14] fix crash that occurs in write_log when user interrupts generation with ^C --- ldm/dream/args.py | 6 ++++-- ldm/dream/log.py | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 92d5acee0c..1204773db7 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -81,7 +81,7 @@ with metadata_from_png(): """ import argparse -from argparse import Namespace +from argparse import Namespace, RawTextHelpFormatter import shlex import json import hashlib @@ -445,7 +445,9 @@ class Args(object): # This creates the parser that processes commands on the dream> command line def _create_dream_cmd_parser(self): parser = argparse.ArgumentParser( - description=""" + formatter_class=RawTextHelpFormatter, + description= + """ *Image generation:* To generate images, type a text prompt with optional switches. Example: a fantastic alien landscape -W576 -H512 -s60 -n4 diff --git a/ldm/dream/log.py b/ldm/dream/log.py index beca10fa3f..8aebe62671 100644 --- a/ldm/dream/log.py +++ b/ldm/dream/log.py @@ -22,6 +22,8 @@ def write_log(results, log_path, file_types, output_cntr): def write_log_message(results, output_cntr): """logs to the terminal""" + if len(results) == 0: + return output_cntr log_lines = [f"{path}: {prompt}\n" for path, prompt in results] if len(log_lines)>1: subcntr = 1 From f93963cd6bcc4abd94d05d2a6352e67a99bfdc2c Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 28 Sep 2022 13:00:54 -0400 Subject: [PATCH 13/14] make CLI help text shorter to improve readability --- ldm/dream/args.py | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 1204773db7..71ac3717da 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -449,35 +449,20 @@ class Args(object): description= """ *Image generation:* - To generate images, type a text prompt with optional switches. Example: - a fantastic alien landscape -W576 -H512 -s60 -n4 + dream> a fantastic alien landscape -W576 -H512 -s60 -n4 *postprocessing* - To post-process a previously-generated image, use the "!fix" command, and - provide the image filename and postprocessing options. You may provide either the filename, - in which case the script will look in the current output directory, or an arbitrary absolute or - relative path to the desired PNG file. - -G (strength) - apply face-fixing, e.g. -G0.8 - -U (scaleg) - upscale to the desired dimensions with ersgan, e.g. -U2 - --embiggen (scale) - upscale using the embiggen algorithm - -ft (algorithm) - select which face-fixing algorithm to use (gfpgan|codeformer) - - Example: !fix 0000045.4829112.png -G1 -U4 -ft codeformer + !fix applies upscaling/facefixing to a previously-generated image. + dream> !fix 0000045.4829112.png -G1 -U4 -ft codeformer *History manipulation* - Use !fetch to retrieve the image generation parameters used to generate a previously-generated - image. The original command will be inserted onto the command line for editing (Linux, Mac), or - printed as a comment above the dream> prompt (Windows). If a bare filename is provided, the script - will look in the current output directory + !fetch retrieves the command used to generate an earlier image. + dream> !fetch 0000015.8929913.png + dream> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 - Example: dream> !fetch 0000015.8929913.png - dream> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 + !history lists all the commands issued during the current session. - Use !history to get a numbered list of the past 1000 commands (Linux, Mac) or the commands issued - during the current session (Windows). - - Use !NN to retrieve the NNth command from the history list and load it into the command line - for editing and re-issuing. + !NN retrieves the NNth command from the history """ ) render_group = parser.add_argument_group('General rendering') @@ -645,7 +630,7 @@ class Args(object): '-embiggen', nargs='+', type=float, - help='Embiggen tiled img2img for higher resolution and detail without extra VRAM usage. Takes scale factor relative to the size of the --init_img (-I), followed by ESRGAN upscaling strength (0-1.0), followed by minimum amount of overlap between tiles as a decimal ratio (0 - 1.0) or number of pixels. ESRGAN strength defaults to 0.75, and overlap defaults to 0.25 . ESRGAN is used to upscale the init prior to cutting it into tiles/pieces to run through img2img and then stitch back togeather.', + help='Arbitrary upscaling using img2img. Provide scale factor (0.75), optionally followed by strength (0.75) and tile overlap proportion (0.25).', default=None, ) postprocessing_group.add_argument( @@ -653,7 +638,7 @@ class Args(object): '-embiggen_tiles', nargs='+', type=int, - help='If while doing Embiggen we are altering only parts of the image, takes a list of tiles by number to process and replace onto the image e.g. `1 3 5`, useful for redoing problematic spots from a prior Embiggen run', + help='For embiggen, provide list of tiles to process and replace onto the image e.g. `1 3 5`.', default=None, ) special_effects_group.add_argument( From 97ec1b156ced74c3dfc411cf9a74aa40b765828d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 28 Sep 2022 14:31:34 -0400 Subject: [PATCH 14/14] fix module-not-found error on startup of new Invoke WebGUI --- scripts/dream.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/dream.py b/scripts/dream.py index c5c52ef07c..9aa1c7751a 100644 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -15,6 +15,7 @@ from ldm.dream.pngwriter import PngWriter from ldm.dream.image_util import make_grid from ldm.dream.log import write_log from omegaconf import OmegaConf +from backend.invoke_ai_web_server import InvokeAIWebServer # The output counter labels each output and is keyed to the # command-line history