From 5418bd3b242a76e571439240d5f664744119ae5e Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Thu, 9 Mar 2023 09:22:29 -0500 Subject: [PATCH 01/11] (ci) unlabel stale issues when commented --- .github/workflows/close-inactive-issues.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index dbb89cc8f2..89c98c1c3f 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -2,6 +2,8 @@ name: Close inactive issues on: schedule: - cron: "00 6 * * *" + issue_comment: + types: [ "created" ] env: DAYS_BEFORE_ISSUE_STALE: 14 @@ -10,6 +12,7 @@ env: jobs: close-issues: runs-on: ubuntu-latest + if: ${{ !github.event.issue.pull_request }} permissions: issues: write pull-requests: write @@ -18,9 +21,9 @@ jobs: with: days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }} days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }} - stale-issue-label: "Inactive Issue" - stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release." - close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue." + stale-issue-label: "stale" + stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. Please reply with a comment to keep the issue open. We recommend testing with the latest release to make sure it hasn't been already fixed." + close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please open a new one and reference issue ${{ github.event.issue.number }}." days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }} From 507e12520ee1ebd65a23e969c839208456423691 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 9 Mar 2023 19:21:57 +0100 Subject: [PATCH 02/11] Make sure command also works with Oh-my-zsh Many people use oh-my-zsh for their command line: https://ohmyz.sh/ Adding `""` should work both on ohmyzsh and native bash --- docs/installation/020_INSTALL_MANUAL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index 711df0f8f9..401560e76c 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -148,7 +148,7 @@ manager, please follow these steps: === "CUDA (NVidia)" ```bash - pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 + pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 ``` === "ROCm (AMD)" From 55d36eaf4fcb3a963ddd055d74fff90ba603834d Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:31:05 -0800 Subject: [PATCH 03/11] fix: image_resized_to_grid_as_tensor: reconnect dropped multiple_of argument --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 6bd1fe339d..cb842356e4 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -175,7 +175,7 @@ def image_resized_to_grid_as_tensor( :param normalize: scale the range to [-1, 1] instead of [0, 1] :param multiple_of: resize the input so both dimensions are a multiple of this """ - w, h = trim_to_multiple_of(*image.size) + w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of) transformation = T.Compose( [ T.Resize((h, w), T.InterpolationMode.LANCZOS), From 081397737b2fc4cc1444e9c142a44c9e094867c2 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:33:06 -0800 Subject: [PATCH 04/11] typo: docstring spelling fixes looks like they've already been corrected in the upstream copy --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index cb842356e4..8e73cc0bb2 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -290,10 +290,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offsensive or harmful. + Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`CLIPFeatureExtractor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. @@ -436,7 +436,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): """ Ready this pipeline's models. - i.e. pre-load them to the GPU if appropriate. + i.e. preload them to the GPU if appropriate. """ self._model_group.ready() From faa2558e2f5c56d402aeedd18f77f3b3b15aad37 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:34:41 -0800 Subject: [PATCH 05/11] chore: add new argument to overridden method to match new signature upstream --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 8e73cc0bb2..3857cf2def 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -440,7 +440,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): """ self._model_group.ready() - def to(self, torch_device: Optional[Union[str, torch.device]] = None): + def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False): # overridden method; types match the superclass. if torch_device is None: return self From 42355b70c2ef2d16dc2e2f55bbb3fb877f1f1961 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:35:54 -0800 Subject: [PATCH 06/11] fix(Pipeline.debug_latents): fix import for moved utility function --- .../backend/stable_diffusion/diffusers_pipeline.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 3857cf2def..c97b122728 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -942,11 +942,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): return super().decode_latents(latents) def debug_latents(self, latents, msg): + from invokeai.backend.image_util import debug_image with torch.inference_mode(): - from ldm.util import debug_image - decoded = self.numpy_to_pil(self.decode_latents(latents)) - for i, img in enumerate(decoded): - debug_image( - img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True - ) + for i, img in enumerate(decoded): + debug_image( + img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True + ) From ad7b1fa6fb41d0c1813660be96ec6876b8cc9ce8 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:59:55 -0800 Subject: [PATCH 07/11] model_manager: model to/from CPU methods are implemented on the Pipeline --- .../backend/model_management/model_manager.py | 40 +------------------ 1 file changed, 2 insertions(+), 38 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index b362500ff7..7639e79362 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -104,7 +104,7 @@ class ModelManager(object): if model_name in self.models: requested_model = self.models[model_name]["model"] print(f">> Retrieving model {model_name} from system RAM cache") - self.models[model_name]["model"] = self._model_from_cpu(requested_model) + requested_model.ready() width = self.models[model_name]["width"] height = self.models[model_name]["height"] hash = self.models[model_name]["hash"] @@ -499,6 +499,7 @@ class ModelManager(object): print(f">> Offloading {model_name} to CPU") model = self.models[model_name]["model"] + model.offload_all() self.models[model_name]["model"] = self._model_to_cpu(model) gc.collect() @@ -1044,43 +1045,6 @@ class ModelManager(object): self.stack.remove(model_name) self.models.pop(model_name, None) - def _model_to_cpu(self, model): - if self.device == CPU_DEVICE: - return model - - if isinstance(model, StableDiffusionGeneratorPipeline): - model.offload_all() - return model - - model.cond_stage_model.device = CPU_DEVICE - model.to(CPU_DEVICE) - - for submodel in ("first_stage_model", "cond_stage_model", "model"): - try: - getattr(model, submodel).to(CPU_DEVICE) - except AttributeError: - pass - return model - - def _model_from_cpu(self, model): - if self.device == CPU_DEVICE: - return model - - if isinstance(model, StableDiffusionGeneratorPipeline): - model.ready() - return model - - model.to(self.device) - model.cond_stage_model.device = self.device - - for submodel in ("first_stage_model", "cond_stage_model", "model"): - try: - getattr(model, submodel).to(self.device) - except AttributeError: - pass - - return model - def _pop_oldest_model(self): """ Remove the first element of the FIFO, which ought From 9d339e94f25178b8bf317679cfc9a790480cea74 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 18:01:37 -0800 Subject: [PATCH 08/11] backend..conditioning: remove code for legacy model --- invokeai/backend/prompting/__init__.py | 1 - invokeai/backend/prompting/conditioning.py | 38 ++------------------ invokeai/backend/web/invoke_ai_web_server.py | 3 +- 3 files changed, 4 insertions(+), 38 deletions(-) diff --git a/invokeai/backend/prompting/__init__.py b/invokeai/backend/prompting/__init__.py index 152edf646b..b52206dd94 100644 --- a/invokeai/backend/prompting/__init__.py +++ b/invokeai/backend/prompting/__init__.py @@ -3,7 +3,6 @@ Initialization file for invokeai.backend.prompting """ from .conditioning import ( get_prompt_structure, - get_tokenizer, get_tokens_for_prompt_object, get_uc_and_c_and_ec, split_weighted_subprompts, diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 5f94b0f975..1ddae1e93d 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -7,7 +7,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an """ import re -from typing import Any, Optional, Union +from typing import Optional, Union from compel import Compel from compel.prompt_parser import ( @@ -17,7 +17,6 @@ from compel.prompt_parser import ( Fragment, PromptParser, ) -from transformers import CLIPTokenizer from invokeai.backend.globals import Globals @@ -25,36 +24,6 @@ from ..stable_diffusion import InvokeAIDiffuserComponent from ..util import torch_dtype -def get_tokenizer(model) -> CLIPTokenizer: - # TODO remove legacy ckpt fallback handling - return ( - getattr(model, "tokenizer", None) # diffusers - or model.cond_stage_model.tokenizer - ) # ldm - - -def get_text_encoder(model) -> Any: - # TODO remove legacy ckpt fallback handling - return getattr( - model, "text_encoder", None - ) or UnsqueezingLDMTransformer( # diffusers - model.cond_stage_model.transformer - ) # ldm - - -class UnsqueezingLDMTransformer: - def __init__(self, ldm_transformer): - self.ldm_transformer = ldm_transformer - - @property - def device(self): - return self.ldm_transformer.device - - def __call__(self, *args, **kwargs): - insufficiently_unsqueezed_tensor = self.ldm_transformer(*args, **kwargs) - return insufficiently_unsqueezed_tensor.unsqueeze(0) - - def get_uc_and_c_and_ec( prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False ): @@ -64,11 +33,10 @@ def get_uc_and_c_and_ec( prompt_string ) - tokenizer = get_tokenizer(model) - text_encoder = get_text_encoder(model) + tokenizer = model.tokenizer compel = Compel( tokenizer=tokenizer, - text_encoder=text_encoder, + text_encoder=model.text_encoder, textual_inversion_manager=model.textual_inversion_manager, dtype_for_device_getter=torch_dtype, truncate_long_prompts=False diff --git a/invokeai/backend/web/invoke_ai_web_server.py b/invokeai/backend/web/invoke_ai_web_server.py index a192073b73..dc77ff4723 100644 --- a/invokeai/backend/web/invoke_ai_web_server.py +++ b/invokeai/backend/web/invoke_ai_web_server.py @@ -29,7 +29,6 @@ from ..image_util import PngWriter, retrieve_metadata from ...frontend.merge.merge_diffusers import merge_diffusion_models from ..prompting import ( get_prompt_structure, - get_tokenizer, get_tokens_for_prompt_object, ) from ..stable_diffusion import PipelineIntermediateState @@ -1274,7 +1273,7 @@ class InvokeAIWebServer: None if type(parsed_prompt) is Blend else get_tokens_for_prompt_object( - get_tokenizer(self.generate.model), parsed_prompt + self.generate.model.tokenizer, parsed_prompt ) ) attention_maps_image_base64_url = ( From 1a829bb998f3f28f35f8d3801d1447e9c3abea10 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 18:04:11 -0800 Subject: [PATCH 09/11] pipeline: remove code for legacy model --- invokeai/backend/generate.py | 12 ---------- .../stable_diffusion/diffusers_pipeline.py | 24 ------------------- 2 files changed, 36 deletions(-) diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index ee5241bca1..35dba41ffb 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -495,18 +495,6 @@ class Generate: torch.cuda.reset_peak_memory_stats() results = list() - init_image = None - mask_image = None - - try: - if ( - self.free_gpu_mem - and self.model.cond_stage_model.device != self.model.device - ): - self.model.cond_stage_model.device = self.model.device - self.model.cond_stage_model.to(self.model.device) - except AttributeError: - pass try: uc, c, extra_conditioning_info = get_uc_and_c_and_ec( diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index c97b122728..51e7b1ee1d 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -54,16 +54,6 @@ class PipelineIntermediateState: attention_map_saver: Optional[AttentionMapSaver] = None -# copied from configs/stable-diffusion/v1-inference.yaml -_default_personalization_config_params = dict( - placeholder_strings=["*"], - initializer_wods=["sculpture"], - per_image_tokens=False, - num_vectors_per_token=1, - progressive_words=False, -) - - @dataclass class AddsMaskLatents: """Add the channels required for inpainting model input. @@ -917,20 +907,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): device=self._model_group.device_for(self.unet), ) - @property - def cond_stage_model(self): - return self.embeddings_provider - - @torch.inference_mode() - def _tokenize(self, prompt: Union[str, List[str]]): - return self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - @property def channels(self) -> int: """Compatible with DiffusionWrapper""" From 14c8738a71efc9ac26f89978c5f807861467b4bb Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 21:41:45 -0500 Subject: [PATCH 10/11] fix dangling reference to _model_to_cpu and missing variable model_description --- invokeai/backend/model_management/model_manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 7639e79362..4627f283f5 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -500,7 +500,6 @@ class ModelManager(object): print(f">> Offloading {model_name} to CPU") model = self.models[model_name]["model"] model.offload_all() - self.models[model_name]["model"] = self._model_to_cpu(model) gc.collect() if self._has_cuda(): @@ -558,7 +557,7 @@ class ModelManager(object): """ model_name = model_name or Path(repo_or_path).stem model_description = ( - model_description or f"Imported diffusers model {model_name}" + description or f"Imported diffusers model {model_name}" ) new_config = dict( description=model_description, From bb3d1bb6cb2a7a9a69861bfb9266072411176d3b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 22:24:43 -0500 Subject: [PATCH 11/11] Revert "Remove label from stale issues on comment event" --- .github/workflows/close-inactive-issues.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index 89c98c1c3f..dbb89cc8f2 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -2,8 +2,6 @@ name: Close inactive issues on: schedule: - cron: "00 6 * * *" - issue_comment: - types: [ "created" ] env: DAYS_BEFORE_ISSUE_STALE: 14 @@ -12,7 +10,6 @@ env: jobs: close-issues: runs-on: ubuntu-latest - if: ${{ !github.event.issue.pull_request }} permissions: issues: write pull-requests: write @@ -21,9 +18,9 @@ jobs: with: days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }} days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }} - stale-issue-label: "stale" - stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. Please reply with a comment to keep the issue open. We recommend testing with the latest release to make sure it hasn't been already fixed." - close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please open a new one and reference issue ${{ github.event.issue.number }}." + stale-issue-label: "Inactive Issue" + stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release." + close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue." days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }}