From 47e21d6e0492f27ebf4f6ec5cd7a584c873af591 Mon Sep 17 00:00:00 2001 From: Billy Date: Tue, 17 Jun 2025 13:56:38 +1000 Subject: [PATCH] Formatting --- invokeai/backend/model_manager/config.py | 9 ++++----- .../backend/model_manager/load/model_loaders/lora.py | 3 +-- invokeai/backend/model_manager/omi.py | 1 + invokeai/backend/model_manager/taxonomy.py | 1 + 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 656091ee2e..83f7ac61e9 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -31,7 +31,6 @@ from pathlib import Path from typing import ClassVar, Literal, Optional, TypeAlias, Union from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter -from pydantic.main import Model from typing_extensions import Annotated, Any, Dict from invokeai.app.util.misc import uuid_string @@ -345,9 +344,9 @@ class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase): metadata = mod.metadata() return ( - metadata.get("modelspec.sai_model_spec") and - metadata.get("ot_branch") == "omi_format" and - metadata["modelspec.architecture"].split("/")[1].lower() == "lora" + metadata.get("modelspec.sai_model_spec") + and metadata.get("ot_branch") == "omi_format" + and metadata["modelspec.architecture"].split("/")[1].lower() == "lora" ) @classmethod @@ -370,7 +369,7 @@ class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase): else: raise InvalidModelConfigException(f"Unrecognised base architecture for OMI LoRA: {base_str}") - return { "base": base } + return {"base": base} class LoRALyCORISConfig(LoRAConfigBase, ModelConfigBase): diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py index 94cfa34803..7a1d56b4dc 100644 --- a/invokeai/backend/model_manager/load/model_loaders/lora.py +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -11,9 +11,9 @@ from safetensors.torch import load_file from invokeai.app.services.config import InvokeAIAppConfig from invokeai.backend.model_manager.config import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader -from invokeai.backend.model_manager.omi import convert_from_omi from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.omi import convert_from_omi from invokeai.backend.model_manager.taxonomy import ( AnyModel, BaseModelType, @@ -77,7 +77,6 @@ class LoRALoader(ModelLoader): if config.format == ModelFormat.OMI: state_dict = convert_from_omi(state_dict) - # Apply state_dict key conversions, if necessary. if self._model_base == BaseModelType.StableDiffusionXL: state_dict = convert_sdxl_keys_to_diffusers_format(state_dict) diff --git a/invokeai/backend/model_manager/omi.py b/invokeai/backend/model_manager/omi.py index 44abad4a99..8698937ace 100644 --- a/invokeai/backend/model_manager/omi.py +++ b/invokeai/backend/model_manager/omi.py @@ -1,4 +1,5 @@ import torch + from invokeai.backend.util.logging import InvokeAILogger logger = InvokeAILogger.get_logger() diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py index 14971d2e5b..91aaf007d5 100644 --- a/invokeai/backend/model_manager/taxonomy.py +++ b/invokeai/backend/model_manager/taxonomy.py @@ -87,6 +87,7 @@ class ModelVariantType(str, Enum): class ModelFormat(str, Enum): """Storage format of model.""" + OMI = "omi" Diffusers = "diffusers" Checkpoint = "checkpoint"