refactor(mm): remove legacy probe, new configs dir structure, update imports

This commit is contained in:
psychedelicious
2025-10-07 14:47:53 +11:00
parent 2043aa3807
commit 5adce0266b
67 changed files with 892 additions and 4910 deletions

View File

@@ -28,9 +28,8 @@ from invokeai.app.services.model_records import (
UnknownModelException,
)
from invokeai.app.util.suppress_output import SuppressOutput
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType
from invokeai.backend.model_manager.config import (
AnyModelConfig,
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.main import (
Main_Checkpoint_SD1_Config,
Main_Checkpoint_SD2_Config,
Main_Checkpoint_SDXL_Config,
@@ -47,6 +46,7 @@ from invokeai.backend.model_manager.starter_models import (
StarterModelBundle,
StarterModelWithoutDependencies,
)
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"])

View File

@@ -22,7 +22,7 @@ from invokeai.app.invocations.model import TransformerField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
from invokeai.backend.model_manager.config import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import CogView4ConditioningInfo

View File

@@ -13,8 +13,7 @@ from invokeai.app.invocations.model import (
VAEField,
)
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
@invocation_output("cogview4_model_loader_output")

View File

@@ -20,9 +20,7 @@ from invokeai.app.invocations.fields import (
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
from invokeai.app.invocations.model import UNetField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.model_manager.config import Main_Config_Base
from invokeai.backend.model_manager.taxonomy import ModelVariantType
from invokeai.backend.model_manager.taxonomy import FluxVariantType, ModelType, ModelVariantType
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
@@ -182,10 +180,11 @@ class CreateGradientMaskInvocation(BaseInvocation):
if self.unet is not None and self.vae is not None and self.image is not None:
# all three fields must be present at the same time
main_model_config = context.models.get_config(self.unet.unet.key)
assert isinstance(main_model_config, Main_Config_Base)
if main_model_config.variant is ModelVariantType.Inpaint:
assert main_model_config.type is ModelType.Main
variant = getattr(main_model_config, "variant", None)
if variant is ModelVariantType.Inpaint or variant is FluxVariantType.DevFill:
mask = dilated_mask_tensor
vae_info: LoadedModel = context.models.load(self.vae.vae)
vae_info = context.models.load(self.vae.vae)
image = context.images.get_pil(self.image.image_name)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:

View File

@@ -39,7 +39,7 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelVariantType
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.patches.layer_patcher import LayerPatcher

View File

@@ -16,9 +16,7 @@ from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import (
IPAdapter_Checkpoint_FLUX_Config,
)
from invokeai.backend.model_manager.configs.ip_adapter import IPAdapter_Checkpoint_FLUX_Config
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType

View File

@@ -14,9 +14,7 @@ from invokeai.app.util.t5_model_identifier import (
preprocess_t5_tokenizer_model_identifier,
)
from invokeai.backend.flux.util import get_flux_max_seq_length
from invokeai.backend.model_manager.config import (
Checkpoint_Config_Base,
)
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType

View File

@@ -24,9 +24,9 @@ from invokeai.app.invocations.primitives import ImageField
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
from invokeai.backend.model_manager import BaseModelType, ModelType
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.starter_models import siglip
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
from invokeai.backend.util.devices import TorchDevice

View File

@@ -17,7 +17,7 @@ from invokeai.app.invocations.model import CLIPField, T5EncoderField
from invokeai.app.invocations.primitives import FluxConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.conditioner import HFEncoder
from invokeai.backend.model_manager import ModelFormat
from invokeai.backend.model_manager.taxonomy import ModelFormat
from invokeai.backend.patches.layer_patcher import LayerPatcher
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw

View File

@@ -12,7 +12,7 @@ from invokeai.app.invocations.model import VAEField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux

View File

@@ -23,7 +23,7 @@ from invokeai.app.invocations.fields import (
from invokeai.app.invocations.model import VAEField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
from invokeai.backend.util.devices import TorchDevice

View File

@@ -11,8 +11,8 @@ from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import (
AnyModelConfig,
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.ip_adapter import (
IPAdapter_Checkpoint_Config_Base,
IPAdapter_InvokeAI_Config_Base,
)

View File

@@ -12,9 +12,7 @@ from invokeai.app.invocations.baseinvocation import (
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import (
AnyModelConfig,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType

View File

@@ -23,7 +23,7 @@ from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
from invokeai.backend.model_manager import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo

View File

@@ -44,8 +44,8 @@ if TYPE_CHECKING:
SessionQueueItem,
SessionQueueStatus,
)
from invokeai.backend.model_manager import SubModelType
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import SubModelType
class EventServiceBase:

View File

@@ -16,8 +16,8 @@ from invokeai.app.services.session_queue.session_queue_common import (
)
from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput
from invokeai.app.util.misc import get_timestamp
from invokeai.backend.model_manager import SubModelType
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import SubModelType
if TYPE_CHECKING:
from invokeai.app.services.download.download_base import DownloadJob

View File

@@ -10,7 +10,7 @@ from typing_extensions import Annotated
from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob
from invokeai.app.services.model_records import ModelRecordChanges
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType

View File

@@ -35,10 +35,9 @@ from invokeai.app.services.model_install.model_install_common import (
)
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.backend.model_manager.config import (
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base
from invokeai.backend.model_manager.configs.factory import (
AnyModelConfig,
Checkpoint_Config_Base,
InvalidModelConfigException,
ModelConfigFactory,
)
from invokeai.backend.model_manager.metadata import (
@@ -532,7 +531,7 @@ class ModelInstallService(ModelInstallServiceBase):
x.content_type is not None and "text/html" in x.content_type for x in multifile_download_job.download_parts
):
install_job.set_error(
InvalidModelConfigException(
ValueError(
f"At least one file in {install_job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
)
)
@@ -602,7 +601,7 @@ class ModelInstallService(ModelInstallServiceBase):
return ModelConfigFactory.from_model_on_disk(
mod=model_path,
overrides=deepcopy(fields),
override_fields=deepcopy(fields),
hash_algo=hash_algo,
)

View File

@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Optional
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load import LoadedModel, LoadedModelWithoutConfig
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType

View File

@@ -11,7 +11,7 @@ from torch import load as torch_load
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load import (
LoadedModel,
LoadedModelWithoutConfig,

View File

@@ -1,12 +1,10 @@
"""Initialization file for model manager service."""
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase
from invokeai.backend.model_manager import AnyModelConfig
from invokeai.backend.model_manager.load import LoadedModel
__all__ = [
"ModelManagerServiceBase",
"ModelManagerService",
"AnyModelConfig",
"LoadedModel",
]

View File

@@ -12,12 +12,10 @@ from pydantic import BaseModel, Field
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
from invokeai.backend.model_manager.config import (
AnyModelConfig,
ControlAdapterDefaultSettings,
LoraModelDefaultSettings,
MainModelDefaultSettings,
)
from invokeai.backend.model_manager.configs.controlnet import ControlAdapterDefaultSettings
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.lora import LoraModelDefaultSettings
from invokeai.backend.model_manager.configs.main import MainModelDefaultSettings
from invokeai.backend.model_manager.taxonomy import (
BaseModelType,
ClipVariantType,

View File

@@ -58,10 +58,7 @@ from invokeai.app.services.model_records.model_records_base import (
)
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from invokeai.backend.model_manager.config import (
AnyModelConfig,
ModelConfigFactory,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig, ModelConfigFactory
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
@@ -157,7 +154,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
record_as_dict[field_name] = getattr(changes, field_name)
# 3. create a new model config from the updated dict
record = ModelConfigFactory.make_config(record_as_dict)
record = ModelConfigFactory.from_dict(record_as_dict)
# If we get this far, the updated model config is valid, so we can save it to the database.
json_serialized = record.model_dump_json()
@@ -187,7 +184,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
SELECT config FROM models
WHERE id=?;
""",
(key,),
@@ -195,14 +192,14 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
rows = cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
model = ModelConfigFactory.from_dict(json.loads(rows[0]))
return model
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
SELECT config FROM models
WHERE hash=?;
""",
(hash,),
@@ -210,7 +207,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
rows = cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
model = ModelConfigFactory.from_dict(json.loads(rows[0]))
return model
def exists(self, key: str) -> bool:
@@ -278,7 +275,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
cursor.execute(
f"""--sql
SELECT config, strftime('%s',updated_at)
SELECT config
FROM models
{where}
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
@@ -291,7 +288,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
results: list[AnyModelConfig] = []
for row in result:
try:
model_config = ModelConfigFactory.make_config(json.loads(row[0]), timestamp=row[1])
model_config = ModelConfigFactory.from_dict(json.loads(row[0]))
except pydantic.ValidationError as e:
# We catch this error so that the app can still run if there are invalid model configs in the database.
# One reason that an invalid model config might be in the database is if someone had to rollback from a
@@ -315,12 +312,12 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
SELECT config FROM models
WHERE path=?;
""",
(str(path),),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
results = [ModelConfigFactory.from_dict(json.loads(x[0])) for x in cursor.fetchall()]
return results
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
@@ -328,12 +325,12 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
SELECT config FROM models
WHERE hash=?;
""",
(hash,),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
results = [ModelConfigFactory.from_dict(json.loads(x[0])) for x in cursor.fetchall()]
return results
def list_models(

View File

@@ -1,6 +1,6 @@
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_relationships.model_relationships_base import ModelRelationshipsServiceABC
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
class ModelRelationshipsService(ModelRelationshipsServiceABC):

View File

@@ -19,10 +19,8 @@ from invokeai.app.services.model_records.model_records_base import UnknownModelE
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.util.step_callback import diffusion_step_callback
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Config_Base,
)
from invokeai.backend.model_manager.configs.base import Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState

View File

@@ -8,7 +8,7 @@ from pydantic import ValidationError
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
from invokeai.backend.model_manager.config import AnyModelConfigValidator
from invokeai.backend.model_manager.configs.factory import AnyModelConfigValidator
class NormalizeResult(NamedTuple):

View File

@@ -12,7 +12,7 @@ from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFie
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.services.events.events_common import EventBase
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.backend.model_manager.config import AnyModelConfigValidator
from invokeai.backend.model_manager.configs.factory import AnyModelConfigValidator
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()

View File

@@ -1,10 +1,7 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from invokeai.backend.model_manager.legacy_probe import CkptType
from typing import Any
def get_flux_in_channels_from_state_dict(state_dict: "CkptType") -> int | None:
def get_flux_in_channels_from_state_dict(state_dict: dict[str | int, Any]) -> int | None:
"""Gets the in channels from the state dict."""
# "Standard" FLUX models use "img_in.weight", but some community fine tunes use

View File

@@ -1,45 +0,0 @@
"""Re-export frequently-used symbols from the Model Manager backend."""
from invokeai.backend.model_manager.config import (
AnyModelConfig,
InvalidModelConfigException,
Config_Base,
ModelConfigFactory,
)
from invokeai.backend.model_manager.legacy_probe import ModelProbe
from invokeai.backend.model_manager.load import LoadedModel
from invokeai.backend.model_manager.search import ModelSearch
from invokeai.backend.model_manager.taxonomy import (
AnyModel,
AnyVariant,
BaseModelType,
ClipVariantType,
ModelFormat,
ModelRepoVariant,
ModelSourceType,
ModelType,
ModelVariantType,
SchedulerPredictionType,
SubModelType,
)
__all__ = [
"AnyModelConfig",
"InvalidModelConfigException",
"LoadedModel",
"ModelConfigFactory",
"ModelProbe",
"ModelSearch",
"Config_Base",
"AnyModel",
"AnyVariant",
"BaseModelType",
"ClipVariantType",
"ModelFormat",
"ModelRepoVariant",
"ModelSourceType",
"ModelType",
"ModelVariantType",
"SchedulerPredictionType",
"SubModelType",
]

File diff suppressed because it is too large Load Diff

View File

@@ -191,8 +191,8 @@ class Config_Base(ABC, BaseModel):
else:
raise TypeError("Model config discriminator value must be computed from a dict or ModelConfigBase instance")
@abstractmethod
@classmethod
@abstractmethod
def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self:
"""Given the model on disk and any override fields, attempt to construct an instance of this config class.

View File

@@ -3,14 +3,13 @@ from typing import (
Self,
)
from pydantic import Field
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import Any
from invokeai.backend.flux.controlnet.state_dict_utils import (
is_state_dict_instantx_controlnet,
is_state_dict_xlabs_controlnet,
)
from invokeai.backend.model_manager.config import ControlAdapterDefaultSettings
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Config_Base, Diffusers_Config_Base
from invokeai.backend.model_manager.configs.identification_utils import (
NotAMatchError,
@@ -29,6 +28,42 @@ from invokeai.backend.model_manager.taxonomy import (
ModelType,
)
MODEL_NAME_TO_PREPROCESSOR = {
"canny": "canny_image_processor",
"mlsd": "mlsd_image_processor",
"depth": "depth_anything_image_processor",
"bae": "normalbae_image_processor",
"normal": "normalbae_image_processor",
"sketch": "pidi_image_processor",
"scribble": "lineart_image_processor",
"lineart anime": "lineart_anime_image_processor",
"lineart_anime": "lineart_anime_image_processor",
"lineart": "lineart_image_processor",
"soft": "hed_image_processor",
"softedge": "hed_image_processor",
"hed": "hed_image_processor",
"shuffle": "content_shuffle_image_processor",
"pose": "dw_openpose_image_processor",
"mediapipe": "mediapipe_face_processor",
"pidi": "pidi_image_processor",
"zoe": "zoe_depth_image_processor",
"color": "color_map_image_processor",
}
class ControlAdapterDefaultSettings(BaseModel):
# This could be narrowed to controlnet processor nodes, but they change. Leaving this a string is safer.
preprocessor: str | None
model_config = ConfigDict(extra="forbid")
@classmethod
def from_model_name(cls, model_name: str) -> Self:
for k, v in MODEL_NAME_TO_PREPROCESSOR.items():
model_name_lower = model_name.lower()
if k in model_name_lower:
return cls(preprocessor=v)
return cls(preprocessor=None)
class ControlNet_Diffusers_Config_Base(Diffusers_Config_Base):
"""Model config for ControlNet models (diffusers version)."""

View File

@@ -14,6 +14,7 @@ from invokeai.backend.model_manager.configs.base import Config_Base
from invokeai.backend.model_manager.configs.clip_embed import CLIPEmbed_Diffusers_G_Config, CLIPEmbed_Diffusers_L_Config
from invokeai.backend.model_manager.configs.clip_vision import CLIPVision_Diffusers_Config
from invokeai.backend.model_manager.configs.controlnet import (
ControlAdapterDefaultSettings,
ControlNet_Checkpoint_FLUX_Config,
ControlNet_Checkpoint_SD1_Config,
ControlNet_Checkpoint_SD2_Config,
@@ -47,6 +48,7 @@ from invokeai.backend.model_manager.configs.lora import (
LoRA_LyCORIS_SDXL_Config,
LoRA_OMI_FLUX_Config,
LoRA_OMI_SDXL_Config,
LoraModelDefaultSettings,
)
from invokeai.backend.model_manager.configs.main import (
Main_BnBNF4_FLUX_Config,
@@ -67,6 +69,7 @@ from invokeai.backend.model_manager.configs.main import (
Main_ExternalAPI_Imagen3_Config,
Main_ExternalAPI_Imagen4_Config,
Main_GGUF_FLUX_Config,
MainModelDefaultSettings,
Video_ExternalAPI_Runway_Config,
Video_ExternalAPI_Veo3_Config,
)
@@ -332,9 +335,52 @@ class ModelConfigFactory:
matches.sort(key=sort_key)
logger.warning(
f"Multiple model config classes matched for model {mod.name}: {[type(m).__name__ for m in matches]}. Using {type(matches[0]).__name__}."
f"Multiple model config classes matched for model {mod.name}: {[type(m).__name__ for m in matches]}."
)
instance = matches[0]
logger.info(f"Model {mod.name} classified as {type(instance).__name__}")
# Now do any post-processing needed for specific model types/bases/etc.
match instance.type:
case ModelType.Main:
match instance.base:
case BaseModelType.StableDiffusion1:
instance.default_settings = MainModelDefaultSettings(width=512, height=512)
case BaseModelType.StableDiffusion2:
instance.default_settings = MainModelDefaultSettings(width=768, height=768)
case BaseModelType.StableDiffusionXL:
instance.default_settings = MainModelDefaultSettings(width=1024, height=1024)
case _:
pass
case ModelType.ControlNet | ModelType.T2IAdapter | ModelType.ControlLoRa:
instance.default_settings = ControlAdapterDefaultSettings.from_model_name(instance.name)
case ModelType.LoRA:
instance.default_settings = LoraModelDefaultSettings()
case _:
pass
return instance
MODEL_NAME_TO_PREPROCESSOR = {
"canny": "canny_image_processor",
"mlsd": "mlsd_image_processor",
"depth": "depth_anything_image_processor",
"bae": "normalbae_image_processor",
"normal": "normalbae_image_processor",
"sketch": "pidi_image_processor",
"scribble": "lineart_image_processor",
"lineart anime": "lineart_anime_image_processor",
"lineart_anime": "lineart_anime_image_processor",
"lineart": "lineart_image_processor",
"soft": "hed_image_processor",
"softedge": "hed_image_processor",
"hed": "hed_image_processor",
"shuffle": "content_shuffle_image_processor",
"pose": "dw_openpose_image_processor",
"mediapipe": "mediapipe_face_processor",
"pidi": "pidi_image_processor",
"zoe": "zoe_depth_image_processor",
"color": "color_map_image_processor",
}

View File

@@ -9,10 +9,10 @@ from typing import (
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import Any
from invokeai.backend.model_manager.config import ControlAdapterDefaultSettings
from invokeai.backend.model_manager.configs.base import (
Config_Base,
)
from invokeai.backend.model_manager.configs.controlnet import ControlAdapterDefaultSettings
from invokeai.backend.model_manager.configs.identification_utils import (
NotAMatchError,
raise_for_override_fields,

View File

@@ -685,8 +685,8 @@ class Video_Config_Base(ABC, BaseModel):
class Video_ExternalAPI_Veo3_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base):
base: Literal[BaseModelType.FluxKontext] = Field(default=BaseModelType.FluxKontext)
base: Literal[BaseModelType.Veo3] = Field(default=BaseModelType.Veo3)
class Video_ExternalAPI_Runway_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base):
base: Literal[BaseModelType.FluxKontext] = Field(default=BaseModelType.FluxKontext)
base: Literal[BaseModelType.Runway] = Field(default=BaseModelType.Runway)

View File

@@ -6,8 +6,8 @@ from typing import (
from pydantic import Field
from typing_extensions import Any
from invokeai.backend.model_manager.config import ControlAdapterDefaultSettings
from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base
from invokeai.backend.model_manager.configs.controlnet import ControlAdapterDefaultSettings
from invokeai.backend.model_manager.configs.identification_utils import (
NotAMatchError,
common_config_paths,

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ from typing import Any, Dict, Generator, Optional, Tuple
import torch
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType

View File

@@ -6,7 +6,8 @@ from pathlib import Path
from typing import Optional
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_manager.config import AnyModelConfig, Diffusers_Config_Base, InvalidModelConfigException
from invokeai.backend.model_manager.configs.base import Diffusers_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache, get_model_cache_key
@@ -50,7 +51,7 @@ class ModelLoader(ModelLoaderBase):
model_path = self._get_model_path(model_config)
if not model_path.exists():
raise InvalidModelConfigException(f"Files for model '{model_config.name}' not found at {model_path}")
raise FileNotFoundError(f"Files for model '{model_config.name}' not found at {model_path}")
with skip_torch_weight_init():
cache_record = self._load_and_cache(model_config, submodel_type)

View File

@@ -18,10 +18,8 @@ Use like this:
from abc import ABC, abstractmethod
from typing import Callable, Dict, Optional, Tuple, Type, TypeVar
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Config_Base,
)
from invokeai.backend.model_manager.configs.base import Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load import ModelLoaderBase
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType, SubModelType

View File

@@ -3,10 +3,8 @@ from typing import Optional
from transformers import CLIPVisionModelWithProjection
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Diffusers_Config_Base,
)
from invokeai.backend.model_manager.configs.base import Diffusers_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType

View File

@@ -3,11 +3,8 @@ from typing import Optional
import torch
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Checkpoint_Config_Base,
Diffusers_Config_Base,
)
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Diffusers_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.taxonomy import (

View File

@@ -5,10 +5,8 @@ from typing import Optional
from diffusers import ControlNetModel
from invokeai.backend.model_manager.config import (
AnyModelConfig,
ControlNet_Checkpoint_Config_Base,
)
from invokeai.backend.model_manager.configs.controlnet import ControlNet_Checkpoint_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.taxonomy import (

View File

@@ -34,21 +34,22 @@ from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
from invokeai.backend.flux.util import get_flux_ae_params, get_flux_transformers_params
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Checkpoint_Config_Base,
CLIPEmbed_Diffusers_Config_Base,
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base
from invokeai.backend.model_manager.configs.clip_embed import CLIPEmbed_Diffusers_Config_Base
from invokeai.backend.model_manager.configs.controlnet import (
ControlNet_Checkpoint_Config_Base,
ControlNet_Diffusers_Config_Base,
FLUXRedux_Checkpoint_Config,
IPAdapter_Checkpoint_Config_Base,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.flux_redux import FLUXRedux_Checkpoint_Config
from invokeai.backend.model_manager.configs.ip_adapter import IPAdapter_Checkpoint_Config_Base
from invokeai.backend.model_manager.configs.main import (
Main_BnBNF4_FLUX_Config,
Main_Checkpoint_FLUX_Config,
Main_GGUF_FLUX_Config,
T5Encoder_BnBLLMint8_Config,
T5Encoder_T5Encoder_Config,
VAE_Checkpoint_Config_Base,
)
from invokeai.backend.model_manager.configs.t5_encoder import T5Encoder_BnBLLMint8_Config, T5Encoder_T5Encoder_Config
from invokeai.backend.model_manager.configs.vae import VAE_Checkpoint_Config_Base
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import (

View File

@@ -8,7 +8,8 @@ from typing import Any, Optional
from diffusers.configuration_utils import ConfigMixin
from diffusers.models.modeling_utils import ModelMixin
from invokeai.backend.model_manager.config import AnyModelConfig, Diffusers_Config_Base, InvalidModelConfigException
from invokeai.backend.model_manager.configs.base import Diffusers_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import (
@@ -56,9 +57,7 @@ class GenericDiffusersLoader(ModelLoader):
module, class_name = config[submodel_type.value]
result = self._hf_definition_to_type(module=module, class_name=class_name)
except KeyError as e:
raise InvalidModelConfigException(
f'The "{submodel_type}" submodel is not available for this model.'
) from e
raise ValueError(f'The "{submodel_type}" submodel is not available for this model.') from e
else:
try:
config = self._load_diffusers_config(model_path, config_name="config.json")
@@ -67,9 +66,9 @@ class GenericDiffusersLoader(ModelLoader):
elif class_name := config.get("architectures"):
result = self._hf_definition_to_type(module="transformers", class_name=class_name[0])
else:
raise InvalidModelConfigException("Unable to decipher Load Class based on given config.json")
raise RuntimeError("Unable to decipher Load Class based on given config.json")
except KeyError as e:
raise InvalidModelConfigException("An expected config.json file is missing from this model.") from e
raise ValueError("An expected config.json file is missing from this model.") from e
assert result is not None
return result

View File

@@ -7,7 +7,7 @@ from typing import Optional
import torch
from invokeai.backend.ip_adapter.ip_adapter import build_ip_adapter
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load import ModelLoader, ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType
from invokeai.backend.raw_model import RawModel

View File

@@ -3,9 +3,7 @@ from typing import Optional
from transformers import LlavaOnevisionForConditionalGeneration
from invokeai.backend.model_manager.config import (
AnyModelConfig,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType

View File

@@ -9,7 +9,7 @@ import torch
from safetensors.torch import load_file
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry

View File

@@ -5,7 +5,7 @@
from pathlib import Path
from typing import Optional
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.taxonomy import (

View File

@@ -3,9 +3,7 @@ from typing import Optional
from transformers import SiglipVisionModel
from invokeai.backend.model_manager.config import (
AnyModelConfig,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType

View File

@@ -3,9 +3,7 @@ from typing import Optional
import torch
from invokeai.backend.model_manager.config import (
AnyModelConfig,
)
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType

View File

@@ -11,10 +11,9 @@ from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpain
StableDiffusionXLInpaintPipeline,
)
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Checkpoint_Config_Base,
Diffusers_Config_Base,
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Diffusers_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.main import (
Main_Checkpoint_SD1_Config,
Main_Checkpoint_SD2_Config,
Main_Checkpoint_SDXL_Config,

View File

@@ -4,7 +4,7 @@
from pathlib import Path
from typing import Optional
from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.taxonomy import (

View File

@@ -5,7 +5,8 @@ from typing import Optional
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from invokeai.backend.model_manager.config import AnyModelConfig, VAE_Checkpoint_Config_Base
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.configs.vae import VAE_Checkpoint_Config_Base
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.model_manager.taxonomy import (

View File

@@ -8,7 +8,8 @@ from typing import Any, Dict, Optional, Set, Tuple
from PIL import Image
from invokeai.app.util.thumbnails import make_thumbnail
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
from invokeai.backend.model_manager.configs.factory import AnyModelConfig
from invokeai.backend.model_manager.taxonomy import ModelType
logger = logging.getLogger(__name__)

View File

@@ -7,7 +7,8 @@ import torch
from invokeai.app.services.model_manager import ModelManagerServiceBase
from invokeai.app.services.model_records import UnknownModelException
from invokeai.backend.model_manager import BaseModelType, LoadedModel, ModelType, SubModelType
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
@pytest.fixture(scope="session")

View File

@@ -17,6 +17,7 @@ import { selectImg2imgStrengthConfig } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useSelectedModelConfig } from 'services/api/hooks/useSelectedModelConfig';
import { isFluxFillMainModelModelConfig } from 'services/api/types';
const selectHasRasterLayersWithContent = createSelector(
selectActiveRasterLayerEntities,
@@ -46,11 +47,7 @@ export const ParamDenoisingStrength = memo(() => {
// Denoising strength does nothing if there are no raster layers w/ content
return true;
}
if (
selectedModelConfig?.type === 'main' &&
selectedModelConfig?.base === 'flux' &&
selectedModelConfig.variant === 'inpaint'
) {
if (selectedModelConfig && isFluxFillMainModelModelConfig(selectedModelConfig)) {
// Denoising strength is ignored by FLUX Fill, which is indicated by the variant being 'inpaint'
return true;
}

View File

@@ -154,7 +154,7 @@ export const getControlLayerWarnings = (
warnings.push(WARNINGS.CONTROL_ADAPTER_INCOMPATIBLE_BASE_MODEL);
} else if (
model.base === 'flux' &&
model.variant === 'inpaint' &&
model.variant === 'dev_fill' &&
entity.controlAdapter.model.type === 'control_lora'
) {
// FLUX inpaint variants are FLUX Fill models - not compatible w/ Control LoRA

View File

@@ -56,18 +56,17 @@ export const ModelView = memo(({ modelConfig }: Props) => {
<ModelAttrView label={t('modelManager.modelFormat')} value={modelConfig.format} />
<ModelAttrView label={t('modelManager.path')} value={modelConfig.path} />
<ModelAttrView label={t('modelManager.fileSize')} value={filesize(modelConfig.file_size)} />
{modelConfig.type === 'main' && (
{modelConfig.type === 'main' && 'variant' in modelConfig && (
<ModelAttrView label={t('modelManager.variant')} value={modelConfig.variant} />
)}
{modelConfig.type === 'main' && modelConfig.format === 'diffusers' && modelConfig.repo_variant && (
<ModelAttrView label={t('modelManager.repoVariant')} value={modelConfig.repo_variant} />
)}
{modelConfig.type === 'main' && modelConfig.format === 'checkpoint' && (
<>
<ModelAttrView label={t('modelManager.pathToConfig')} value={modelConfig.config_path} />
<ModelAttrView label={t('modelManager.predictionType')} value={modelConfig.prediction_type} />
<ModelAttrView label={t('modelManager.upcastAttention')} value={`${modelConfig.upcast_attention}`} />
</>
<ModelAttrView label={t('modelManager.pathToConfig')} value={modelConfig.config_path} />
)}
{modelConfig.type === 'main' && modelConfig.format === 'checkpoint' && 'prediction_type' in modelConfig && (
<ModelAttrView label={t('modelManager.predictionType')} value={modelConfig.prediction_type} />
)}
{modelConfig.type === 'ip_adapter' && modelConfig.format === 'invokeai' && (
<ModelAttrView label={t('modelManager.imageEncoderModelId')} value={modelConfig.image_encoder_model_id} />

View File

@@ -660,6 +660,7 @@ describe('Graph', () => {
cover_image: null,
type: 'main',
trigger_phrases: null,
prediction_type: 'epsilon',
default_settings: {
vae: null,
vae_precision: null,
@@ -673,7 +674,6 @@ describe('Graph', () => {
variant: 'inpaint',
format: 'diffusers',
repo_variant: 'fp16',
submodels: null,
usage_info: null,
});
expect(field).toEqual({

View File

@@ -5,7 +5,7 @@ import type { CanvasControlLayerState, Rect } from 'features/controlLayers/store
import { getControlLayerWarnings } from 'features/controlLayers/store/validators';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { serializeError } from 'serialize-error';
import type { ImageDTO, Invocation, MainModelConfig } from 'services/api/types';
import type { FLUXModelConfig, ImageDTO, Invocation, MainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
const log = logger('system');
@@ -113,7 +113,7 @@ type AddControlLoRAArg = {
entities: CanvasControlLayerState[];
g: Graph;
rect: Rect;
model: MainModelConfig;
model: FLUXModelConfig;
denoise: Invocation<'flux_denoise'>;
};
@@ -129,7 +129,7 @@ export const addControlLoRA = async ({ manager, entities, g, rect, model, denois
return;
}
assert(model.variant !== 'inpaint', 'FLUX Control LoRA is not compatible with FLUX Fill.');
assert(model.variant !== 'dev_fill', 'FLUX Control LoRA is not compatible with FLUX Fill.');
assert(validControlLayers.length <= 1, 'Cannot add more than one FLUX control LoRA.');
const getImageDTOResult = await withResultAsync(() => {

View File

@@ -49,7 +49,7 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
assert(clipEmbedModel, 'No CLIP Embed model found in state');
assert(fluxVAE, 'No FLUX VAE model found in state');
const isFLUXFill = model.variant === 'inpaint';
const isFLUXFill = model.variant === 'dev_fill';
let guidance = baseGuidance;
if (isFLUXFill) {
// FLUX Fill doesn't work with Text to Image or Image to Image generation modes. Well, technically, it does, but

View File

@@ -236,7 +236,7 @@ export const getDenoisingStartAndEnd = (state: RootState): { denoising_start: nu
};
}
case 'flux': {
if (model.variant === 'inpaint') {
if (model.variant === 'dev_fill') {
// This is a FLUX Fill model - we always denoise fully
return {
denoising_start: 0,

File diff suppressed because it is too large Load Diff

View File

@@ -108,6 +108,7 @@ export const isVideoDTO = (dto: ImageDTO | VideoDTO): dto is VideoDTO => {
// Model Configs
export type AnyModelConfig = S['AnyModelConfig'];
export type MainModelConfig = Extract<S['AnyModelConfig'], { type: 'main' }>;
export type FLUXModelConfig = Extract<S['AnyModelConfig'], { type: 'main'; base: 'flux' }>;
export type ControlLoRAModelConfig = Extract<S['AnyModelConfig'], { type: 'control_lora' }>;
export type LoRAModelConfig = Extract<S['AnyModelConfig'], { type: 'lora' }>;
export type VAEModelConfig = Extract<S['AnyModelConfig'], { type: 'vae' }>;
@@ -134,6 +135,7 @@ type UnknownModelConfig = Extract<S['AnyModelConfig'], { type: 'unknown' }>;
export type FLUXKontextModelConfig = MainModelConfig;
export type ChatGPT4oModelConfig = ApiModelConfig;
export type Gemini2_5ModelConfig = ApiModelConfig;
type SubmodelDefinition = S['SubmodelDefinition'];
/**
* Checks if a list of submodels contains any that match a given variant or type
@@ -141,7 +143,7 @@ export type Gemini2_5ModelConfig = ApiModelConfig;
* @param checkStr The string to check against for variant or type
* @returns A boolean
*/
const checkSubmodel = (submodels: AnyModelConfig['submodels'], checkStr: string): boolean => {
const checkSubmodel = (submodels: Record<string, SubmodelDefinition>, checkStr: string): boolean => {
for (const submodel in submodels) {
if (
submodel &&
@@ -164,6 +166,7 @@ const checkSubmodels = (identifiers: string[], config: AnyModelConfig): boolean
return identifiers.every(
(identifier) =>
config.type === 'main' &&
'submodels' in config &&
config.submodels &&
(identifier in config.submodels || checkSubmodel(config.submodels, identifier))
);
@@ -332,7 +335,7 @@ export const isRefinerMainModelModelConfig = (config: AnyModelConfig): config is
};
export const isFluxFillMainModelModelConfig = (config: AnyModelConfig): config is MainModelConfig => {
return config.type === 'main' && config.base === 'flux' && config.variant === 'inpaint';
return config.type === 'main' && config.base === 'flux' && config.variant === 'dev_fill';
};
export const isTIModelConfig = (config: AnyModelConfig): config is MainModelConfig => {

View File

@@ -8,7 +8,7 @@ from typing import get_args
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
from invokeai.backend.model_manager import InvalidModelConfigException, ModelProbe
from invokeai.backend.model_manager.config import ModelConfigFactory
from invokeai.backend.model_manager.configs.factory import ModelConfigFactory
algos = ", ".join(set(get_args(HASHING_ALGORITHMS)))

View File

@@ -13,9 +13,9 @@ from torch import tensor
from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelRepoVariant, ModelType, ModelVariantType
from invokeai.backend.model_manager.config import (
AnyModelConfig,
Config_Base,
InvalidModelConfigException,
MainDiffusersConfig,
Config_Base,
ModelConfigFactory,
get_model_discriminator_value,
)