diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py index 097aab67a9..be6da03e24 100644 --- a/invokeai/backend/model_manager/load/model_loaders/lora.py +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -86,14 +86,14 @@ class LoRALoader(ModelLoader): state_dict = convert_sdxl_keys_to_diffusers_format(state_dict) model = lora_model_from_sd_state_dict(state_dict=state_dict) elif self._model_base == BaseModelType.Flux: - if config.format == ModelFormat.Diffusers: + if config.format in [ModelFormat.Diffusers, ModelFormat.OMI]: # HACK(ryand): We set alpha=None for diffusers PEFT format models. These models are typically # distributed as a single file without the associated metadata containing the alpha value. We chose # alpha=None, because this is treated as alpha=rank internally in `LoRALayerBase.scale()`. alpha=rank # is a popular choice. For example, in the diffusers training scripts: # https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_flux.py#L1194 model = lora_model_from_flux_diffusers_state_dict(state_dict=state_dict, alpha=None) - elif config.format in [ModelFormat.LyCORIS, ModelFormat.OMI]: + elif config.format == ModelFormat.LyCORIS: if is_state_dict_likely_in_flux_kohya_format(state_dict=state_dict): model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict) elif is_state_dict_likely_in_flux_onetrainer_format(state_dict=state_dict): diff --git a/invokeai/backend/model_manager/omi.py b/invokeai/backend/model_manager/omi.py index db12bcddc9..94b339672c 100644 --- a/invokeai/backend/model_manager/omi.py +++ b/invokeai/backend/model_manager/omi.py @@ -15,4 +15,6 @@ def convert_from_omi(weights_sd: StateDict, base: BaseModelType): BaseModelType.StableDiffusion1: convert_sd_lora_key_sets(), BaseModelType.StableDiffusion3: convert_sd3_lora_key_sets(), }[base] - return lora_util.__convert(weights_sd, keyset, "omi", "diffusers") + + target = "diffusers" # alternatively, "legacy_diffusers" + return lora_util.__convert(weights_sd, keyset, "omi", target) # type: ignore