mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 05:08:03 -05:00
Compare commits
20 Commits
main
...
psyche/fea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f3e5ced38 | ||
|
|
b9c7c6a282 | ||
|
|
73bed0d834 | ||
|
|
4070f26e8a | ||
|
|
15e5c9aec9 | ||
|
|
01596340da | ||
|
|
b18916d476 | ||
|
|
d6b72a316a | ||
|
|
39bb60a778 | ||
|
|
82409d1ecd | ||
|
|
57787e3dcf | ||
|
|
c9dd1159a8 | ||
|
|
3f82c38e09 | ||
|
|
e348105800 | ||
|
|
a87fcfd4f7 | ||
|
|
7f9022e4aa | ||
|
|
fa47e23643 | ||
|
|
bd893cf3f6 | ||
|
|
b68871a13f | ||
|
|
3f3f941e09 |
@@ -108,6 +108,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
|
||||
unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.
|
||||
allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.
|
||||
"""
|
||||
|
||||
_root: Optional[Path] = PrivateAttr(default=None)
|
||||
@@ -198,6 +199,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
|
||||
scan_models_on_startup: bool = Field(default=False, description="Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.")
|
||||
unsafe_disable_picklescan: bool = Field(default=False, description="UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.")
|
||||
allow_unknown_models: bool = Field(default=True, description="Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.")
|
||||
|
||||
# fmt: on
|
||||
|
||||
|
||||
@@ -546,11 +546,18 @@ class ModelInstallCompleteEvent(ModelEventBase):
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
key: str = Field(description="Model config record key")
|
||||
total_bytes: Optional[int] = Field(description="Size of the model (may be None for installation of a local path)")
|
||||
config: AnyModelConfig = Field(description="The installed model's config")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob") -> "ModelInstallCompleteEvent":
|
||||
assert job.config_out is not None
|
||||
return cls(id=job.id, source=job.source, key=(job.config_out.key), total_bytes=job.total_bytes)
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=job.source,
|
||||
key=(job.config_out.key),
|
||||
total_bytes=job.total_bytes,
|
||||
config=job.config_out,
|
||||
)
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
|
||||
@@ -28,11 +28,12 @@ from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from inspect import isabstract
|
||||
from pathlib import Path
|
||||
from typing import ClassVar, Literal, Optional, TypeAlias, Union
|
||||
from typing import ClassVar, Literal, Optional, Type, TypeAlias, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter
|
||||
from typing_extensions import Annotated, Any, Dict
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.model_hash.hash_validator import validate_hash
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
|
||||
@@ -55,6 +56,7 @@ from invokeai.backend.model_manager.util.model_util import lora_token_vector_len
|
||||
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
app_config = get_config()
|
||||
|
||||
|
||||
class InvalidModelConfigException(Exception):
|
||||
@@ -109,6 +111,18 @@ class MatchSpeed(int, Enum):
|
||||
SLOW = 2
|
||||
|
||||
|
||||
class LegacyProbeMixin:
|
||||
"""Mixin for classes using the legacy probe for model classification."""
|
||||
|
||||
@classmethod
|
||||
def matches(cls, *args, **kwargs):
|
||||
raise NotImplementedError(f"Method 'matches' not implemented for {cls.__name__}")
|
||||
|
||||
@classmethod
|
||||
def parse(cls, *args, **kwargs):
|
||||
raise NotImplementedError(f"Method 'parse' not implemented for {cls.__name__}")
|
||||
|
||||
|
||||
class ModelConfigBase(ABC, BaseModel):
|
||||
"""
|
||||
Abstract Base class for model configurations.
|
||||
@@ -125,7 +139,7 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any]) -> None:
|
||||
schema["required"].extend(["key", "type", "format"])
|
||||
schema["required"].extend(["key", "base", "type", "format"])
|
||||
|
||||
model_config = ConfigDict(validate_assignment=True, json_schema_extra=json_schema_extra)
|
||||
|
||||
@@ -152,14 +166,15 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
)
|
||||
usage_info: Optional[str] = Field(default=None, description="Usage information for this model")
|
||||
|
||||
USING_LEGACY_PROBE: ClassVar[set] = set()
|
||||
USING_CLASSIFY_API: ClassVar[set] = set()
|
||||
USING_LEGACY_PROBE: ClassVar[set[Type["ModelConfigBase"]]] = set()
|
||||
USING_CLASSIFY_API: ClassVar[set[Type["ModelConfigBase"]]] = set()
|
||||
_MATCH_SPEED: ClassVar[MatchSpeed] = MatchSpeed.MED
|
||||
|
||||
def __init_subclass__(cls, **kwargs):
|
||||
super().__init_subclass__(**kwargs)
|
||||
if issubclass(cls, LegacyProbeMixin):
|
||||
ModelConfigBase.USING_LEGACY_PROBE.add(cls)
|
||||
# Cannot use `elif isinstance(cls, UnknownModelConfig)` because UnknownModelConfig is not defined yet
|
||||
else:
|
||||
ModelConfigBase.USING_CLASSIFY_API.add(cls)
|
||||
|
||||
@@ -170,7 +185,9 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
return concrete
|
||||
|
||||
@staticmethod
|
||||
def classify(mod: str | Path | ModelOnDisk, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides):
|
||||
def classify(
|
||||
mod: str | Path | ModelOnDisk, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides
|
||||
) -> "AnyModelConfig":
|
||||
"""
|
||||
Returns the best matching ModelConfig instance from a model's file/folder path.
|
||||
Raises InvalidModelConfigException if no valid configuration is found.
|
||||
@@ -192,6 +209,13 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
else:
|
||||
return config_cls.from_model_on_disk(mod, **overrides)
|
||||
|
||||
if app_config.allow_unknown_models:
|
||||
try:
|
||||
return UnknownModelConfig.from_model_on_disk(mod, **overrides)
|
||||
except Exception:
|
||||
# Fall through to raising the exception below
|
||||
pass
|
||||
|
||||
raise InvalidModelConfigException("Unable to determine model type")
|
||||
|
||||
@classmethod
|
||||
@@ -240,32 +264,31 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
cls.cast_overrides(overrides)
|
||||
fields.update(overrides)
|
||||
|
||||
type = fields.get("type") or cls.model_fields["type"].default
|
||||
base = fields.get("base") or cls.model_fields["base"].default
|
||||
|
||||
fields["path"] = mod.path.as_posix()
|
||||
fields["source"] = fields.get("source") or fields["path"]
|
||||
fields["source_type"] = fields.get("source_type") or ModelSourceType.Path
|
||||
fields["name"] = name = fields.get("name") or mod.name
|
||||
fields["name"] = fields.get("name") or mod.name
|
||||
fields["hash"] = fields.get("hash") or mod.hash()
|
||||
fields["key"] = fields.get("key") or uuid_string()
|
||||
fields["description"] = fields.get("description") or f"{base.value} {type.value} model {name}"
|
||||
fields["description"] = fields.get("description")
|
||||
fields["repo_variant"] = fields.get("repo_variant") or mod.repo_variant()
|
||||
fields["file_size"] = fields.get("file_size") or mod.size()
|
||||
|
||||
return cls(**fields)
|
||||
|
||||
|
||||
class LegacyProbeMixin:
|
||||
"""Mixin for classes using the legacy probe for model classification."""
|
||||
class UnknownModelConfig(ModelConfigBase):
|
||||
base: Literal[BaseModelType.Unknown] = BaseModelType.Unknown
|
||||
type: Literal[ModelType.Unknown] = ModelType.Unknown
|
||||
format: Literal[ModelFormat.Unknown] = ModelFormat.Unknown
|
||||
|
||||
@classmethod
|
||||
def matches(cls, *args, **kwargs):
|
||||
raise NotImplementedError(f"Method 'matches' not implemented for {cls.__name__}")
|
||||
def matches(cls, mod: ModelOnDisk) -> bool:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def parse(cls, *args, **kwargs):
|
||||
raise NotImplementedError(f"Method 'parse' not implemented for {cls.__name__}")
|
||||
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
class CheckpointConfigBase(ABC, BaseModel):
|
||||
@@ -353,7 +376,7 @@ class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase):
|
||||
|
||||
metadata = mod.metadata()
|
||||
return (
|
||||
metadata.get("modelspec.sai_model_spec")
|
||||
bool(metadata.get("modelspec.sai_model_spec"))
|
||||
and metadata.get("ot_branch") == "omi_format"
|
||||
and metadata["modelspec.architecture"].split("/")[1].lower() == "lora"
|
||||
)
|
||||
@@ -751,6 +774,7 @@ AnyModelConfig = Annotated[
|
||||
Annotated[LlavaOnevisionConfig, LlavaOnevisionConfig.get_tag()],
|
||||
Annotated[ApiModelConfig, ApiModelConfig.get_tag()],
|
||||
Annotated[VideoApiModelConfig, VideoApiModelConfig.get_tag()],
|
||||
Annotated[UnknownModelConfig, UnknownModelConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
||||
@@ -33,6 +33,7 @@ class BaseModelType(str, Enum):
|
||||
FluxKontext = "flux-kontext"
|
||||
Veo3 = "veo3"
|
||||
Runway = "runway"
|
||||
Unknown = "unknown"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
@@ -55,6 +56,7 @@ class ModelType(str, Enum):
|
||||
FluxRedux = "flux_redux"
|
||||
LlavaOnevision = "llava_onevision"
|
||||
Video = "video"
|
||||
Unknown = "unknown"
|
||||
|
||||
|
||||
class SubModelType(str, Enum):
|
||||
@@ -107,6 +109,7 @@ class ModelFormat(str, Enum):
|
||||
BnbQuantizednf4b = "bnb_quantized_nf4b"
|
||||
GGUFQuantized = "gguf_quantized"
|
||||
Api = "api"
|
||||
Unknown = "unknown"
|
||||
|
||||
|
||||
class SchedulerPredictionType(str, Enum):
|
||||
|
||||
@@ -914,6 +914,9 @@
|
||||
"hfTokenReset": "HF Token Reset",
|
||||
"urlUnauthorizedErrorMessage": "You may need to configure an API token to access this model.",
|
||||
"urlUnauthorizedErrorMessage2": "Learn how here.",
|
||||
"unidentifiedModelTitle": "Unable to identify model",
|
||||
"unidentifiedModelMessage": "We were unable to identify the type, base and/or format of the installed model. Try editing the model and selecting the appropriate settings for the model.",
|
||||
"unidentifiedModelMessage2": "If you don't see the correct settings, or the model doesn't work after changing them, ask for help on <DiscordLink /> or create an issue on <GitHubIssuesLink />.",
|
||||
"imageEncoderModelId": "Image Encoder Model ID",
|
||||
"installedModelsCount": "{{installed}} of {{total}} models installed.",
|
||||
"includesNModels": "Includes {{n}} models and their dependencies.",
|
||||
@@ -942,6 +945,7 @@
|
||||
"modelConverted": "Model Converted",
|
||||
"modelDeleted": "Model Deleted",
|
||||
"modelDeleteFailed": "Failed to delete model",
|
||||
"modelFormat": "Model Format",
|
||||
"modelImageDeleted": "Model Image Deleted",
|
||||
"modelImageDeleteFailed": "Model Image Delete Failed",
|
||||
"modelImageUpdated": "Model Image Updated",
|
||||
|
||||
@@ -11,8 +11,8 @@ import {
|
||||
selectCanvasSlice,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
|
||||
@@ -37,7 +37,7 @@ import type { Logger } from 'roarr';
|
||||
import { modelConfigsAdapterSelectors, modelsApi } from 'services/api/endpoints/models';
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
import {
|
||||
isCLIPEmbedModelConfig,
|
||||
isCLIPEmbedModelConfigOrSubmodel,
|
||||
isControlLayerModelConfig,
|
||||
isControlNetModelConfig,
|
||||
isFluxReduxModelConfig,
|
||||
@@ -48,7 +48,7 @@ import {
|
||||
isNonRefinerMainModelConfig,
|
||||
isRefinerMainModelModelConfig,
|
||||
isSpandrelImageToImageModelConfig,
|
||||
isT5EncoderModelConfig,
|
||||
isT5EncoderModelConfigOrSubmodel,
|
||||
isVideoModelConfig,
|
||||
} from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
@@ -418,7 +418,7 @@ const handleTileControlNetModel: ModelHandler = (models, state, dispatch, log) =
|
||||
|
||||
const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedT5EncoderModel = state.params.t5EncoderModel;
|
||||
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfig(m));
|
||||
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfigOrSubmodel(m));
|
||||
|
||||
// If the currently selected model is available, we don't need to do anything
|
||||
if (selectedT5EncoderModel && t5EncoderModels.some((m) => m.key === selectedT5EncoderModel.key)) {
|
||||
@@ -446,7 +446,7 @@ const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
|
||||
const handleCLIPEmbedModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedCLIPEmbedModel = state.params.clipEmbedModel;
|
||||
const CLIPEmbedModels = models.filter((m) => isCLIPEmbedModelConfig(m));
|
||||
const CLIPEmbedModels = models.filter((m) => isCLIPEmbedModelConfigOrSubmodel(m));
|
||||
|
||||
// If the currently selected model is available, we don't need to do anything
|
||||
if (selectedCLIPEmbedModel && CLIPEmbedModels.some((m) => m.key === selectedCLIPEmbedModel.key)) {
|
||||
|
||||
@@ -17,7 +17,7 @@ import Konva from 'konva';
|
||||
import { atom, computed } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { buildSelectModelConfig } from 'services/api/hooks/modelsByType';
|
||||
import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
|
||||
import { isControlLayerModelConfig } from 'services/api/types';
|
||||
import stableHash from 'stable-hash';
|
||||
import type { Equals } from 'tsafe';
|
||||
@@ -202,11 +202,19 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
|
||||
createInitialFilterConfig = (): FilterConfig => {
|
||||
if (this.parent.type === 'control_layer_adapter' && this.parent.state.controlAdapter.model) {
|
||||
// If the parent is a control layer adapter, we should check if the model has a default filter and set it if so
|
||||
const selectModelConfig = buildSelectModelConfig(
|
||||
this.parent.state.controlAdapter.model.key,
|
||||
isControlLayerModelConfig
|
||||
);
|
||||
const modelConfig = this.manager.stateApi.runSelector(selectModelConfig);
|
||||
const key = this.parent.state.controlAdapter.model.key;
|
||||
const modelConfig = this.manager.stateApi.runSelector((state) => {
|
||||
const { data } = selectModelConfigsQuery(state);
|
||||
if (!data) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
modelConfigsAdapterSelectors
|
||||
.selectAll(data)
|
||||
.filter(isControlLayerModelConfig)
|
||||
.find((m) => m.key === key) ?? null
|
||||
);
|
||||
});
|
||||
// This always returns a filter
|
||||
const filter = getFilterForModel(modelConfig) ?? IMAGE_FILTERS.canny_edge_detection;
|
||||
return filter.buildDefaults();
|
||||
|
||||
@@ -13,8 +13,8 @@ import { selectBboxOverlay } from 'features/controlLayers/store/canvasSettingsSl
|
||||
import { selectModel } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectBbox } from 'features/controlLayers/store/selectors';
|
||||
import type { Coordinate, Rect, Tool } from 'features/controlLayers/store/types';
|
||||
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import Konva from 'konva';
|
||||
import { atom } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
|
||||
@@ -35,8 +35,8 @@ import {
|
||||
getScaledBoundingBoxDimensions,
|
||||
} from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
|
||||
import { simplifyFlatNumbersArray } from 'features/controlLayers/util/simplify';
|
||||
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import type { IRect } from 'konva/lib/types';
|
||||
import type { UndoableOptions } from 'redux-undo';
|
||||
|
||||
@@ -25,14 +25,14 @@ import {
|
||||
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
|
||||
import {
|
||||
API_BASE_MODELS,
|
||||
CLIP_SKIP_MAP,
|
||||
SUPPORTS_ASPECT_RATIO_BASE_MODELS,
|
||||
SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS,
|
||||
SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS,
|
||||
SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS,
|
||||
SUPPORTS_REF_IMAGES_BASE_MODELS,
|
||||
SUPPORTS_SEED_BASE_MODELS,
|
||||
} from 'features/parameters/types/constants';
|
||||
} from 'features/modelManagerV2/models';
|
||||
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
|
||||
import type {
|
||||
ParameterCanvasCoherenceMode,
|
||||
ParameterCFGRescaleMultiplier,
|
||||
|
||||
@@ -6,8 +6,8 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
|
||||
import type { GroupStatusMap } from 'common/components/Picker/Picker';
|
||||
import { loraAdded, selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import { ModelPicker } from 'features/parameters/components/ModelPicker';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useLoRAModels } from 'services/api/hooks/modelsByType';
|
||||
|
||||
@@ -49,7 +49,7 @@ import {
|
||||
zVideoDuration,
|
||||
zVideoResolution,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import type { ModelIdentifierField, ModelType } from 'features/nodes/types/common';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { zModelIdentifier } from 'features/nodes/types/v2/common';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
@@ -108,7 +108,7 @@ import { useCallback, useEffect, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { modelsApi } from 'services/api/endpoints/models';
|
||||
import type { AnyModelConfig, ModelType } from 'services/api/types';
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
import z from 'zod';
|
||||
|
||||
|
||||
298
invokeai/frontend/web/src/features/modelManagerV2/models.ts
Normal file
298
invokeai/frontend/web/src/features/modelManagerV2/models.ts
Normal file
@@ -0,0 +1,298 @@
|
||||
import type { BaseModelType, ModelFormat, ModelType, ModelVariantType } from 'features/nodes/types/common';
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
import {
|
||||
isCLIPEmbedModelConfig,
|
||||
isCLIPVisionModelConfig,
|
||||
isControlLoRAModelConfig,
|
||||
isControlNetModelConfig,
|
||||
isFluxReduxModelConfig,
|
||||
isIPAdapterModelConfig,
|
||||
isLLaVAModelConfig,
|
||||
isLoRAModelConfig,
|
||||
isNonRefinerMainModelConfig,
|
||||
isRefinerMainModelModelConfig,
|
||||
isSigLipModelConfig,
|
||||
isSpandrelImageToImageModelConfig,
|
||||
isT2IAdapterModelConfig,
|
||||
isT5EncoderModelConfig,
|
||||
isTIModelConfig,
|
||||
isUnknownModelConfig,
|
||||
isVAEModelConfig,
|
||||
isVideoModelConfig,
|
||||
} from 'services/api/types';
|
||||
import { objectEntries } from 'tsafe';
|
||||
|
||||
import type { FilterableModelType } from './store/modelManagerV2Slice';
|
||||
|
||||
export type ModelCategoryData = {
|
||||
category: FilterableModelType;
|
||||
i18nKey: string;
|
||||
filter: (config: AnyModelConfig) => boolean;
|
||||
};
|
||||
|
||||
export const MODEL_CATEGORIES: Record<FilterableModelType, ModelCategoryData> = {
|
||||
unknown: {
|
||||
category: 'unknown',
|
||||
i18nKey: 'common.unknown',
|
||||
filter: isUnknownModelConfig,
|
||||
},
|
||||
main: {
|
||||
category: 'main',
|
||||
i18nKey: 'modelManager.main',
|
||||
filter: isNonRefinerMainModelConfig,
|
||||
},
|
||||
refiner: {
|
||||
category: 'refiner',
|
||||
i18nKey: 'sdxl.refiner',
|
||||
filter: isRefinerMainModelModelConfig,
|
||||
},
|
||||
lora: {
|
||||
category: 'lora',
|
||||
i18nKey: 'modelManager.loraModels',
|
||||
filter: isLoRAModelConfig,
|
||||
},
|
||||
embedding: {
|
||||
category: 'embedding',
|
||||
i18nKey: 'modelManager.textualInversions',
|
||||
filter: isTIModelConfig,
|
||||
},
|
||||
controlnet: {
|
||||
category: 'controlnet',
|
||||
i18nKey: 'ControlNet',
|
||||
filter: isControlNetModelConfig,
|
||||
},
|
||||
t2i_adapter: {
|
||||
category: 't2i_adapter',
|
||||
i18nKey: 'common.t2iAdapter',
|
||||
filter: isT2IAdapterModelConfig,
|
||||
},
|
||||
t5_encoder: {
|
||||
category: 't5_encoder',
|
||||
i18nKey: 'modelManager.t5Encoder',
|
||||
filter: isT5EncoderModelConfig,
|
||||
},
|
||||
control_lora: {
|
||||
category: 'control_lora',
|
||||
i18nKey: 'modelManager.controlLora',
|
||||
filter: isControlLoRAModelConfig,
|
||||
},
|
||||
clip_embed: {
|
||||
category: 'clip_embed',
|
||||
i18nKey: 'modelManager.clipEmbed',
|
||||
filter: isCLIPEmbedModelConfig,
|
||||
},
|
||||
spandrel_image_to_image: {
|
||||
category: 'spandrel_image_to_image',
|
||||
i18nKey: 'modelManager.spandrelImageToImage',
|
||||
filter: isSpandrelImageToImageModelConfig,
|
||||
},
|
||||
ip_adapter: {
|
||||
category: 'ip_adapter',
|
||||
i18nKey: 'common.ipAdapter',
|
||||
filter: isIPAdapterModelConfig,
|
||||
},
|
||||
vae: {
|
||||
category: 'vae',
|
||||
i18nKey: 'VAE',
|
||||
filter: isVAEModelConfig,
|
||||
},
|
||||
clip_vision: {
|
||||
category: 'clip_vision',
|
||||
i18nKey: 'CLIP Vision',
|
||||
filter: isCLIPVisionModelConfig,
|
||||
},
|
||||
siglip: {
|
||||
category: 'siglip',
|
||||
i18nKey: 'modelManager.sigLip',
|
||||
filter: isSigLipModelConfig,
|
||||
},
|
||||
flux_redux: {
|
||||
category: 'flux_redux',
|
||||
i18nKey: 'modelManager.fluxRedux',
|
||||
filter: isFluxReduxModelConfig,
|
||||
},
|
||||
llava_onevision: {
|
||||
category: 'llava_onevision',
|
||||
i18nKey: 'modelManager.llavaOnevision',
|
||||
filter: isLLaVAModelConfig,
|
||||
},
|
||||
video: {
|
||||
category: 'video',
|
||||
i18nKey: 'Video',
|
||||
filter: isVideoModelConfig,
|
||||
},
|
||||
};
|
||||
|
||||
export const MODEL_CATEGORIES_AS_LIST = objectEntries(MODEL_CATEGORIES).map(([category, { i18nKey, filter }]) => ({
|
||||
category,
|
||||
i18nKey,
|
||||
filter,
|
||||
}));
|
||||
|
||||
/**
|
||||
* Mapping of model base to its color
|
||||
*/
|
||||
export const MODEL_BASE_TO_COLOR: Record<BaseModelType, string> = {
|
||||
any: 'base',
|
||||
'sd-1': 'green',
|
||||
'sd-2': 'teal',
|
||||
'sd-3': 'purple',
|
||||
sdxl: 'invokeBlue',
|
||||
'sdxl-refiner': 'invokeBlue',
|
||||
flux: 'gold',
|
||||
cogview4: 'red',
|
||||
imagen3: 'pink',
|
||||
imagen4: 'pink',
|
||||
'chatgpt-4o': 'pink',
|
||||
'flux-kontext': 'pink',
|
||||
'gemini-2.5': 'pink',
|
||||
veo3: 'purple',
|
||||
runway: 'green',
|
||||
unknown: 'red',
|
||||
};
|
||||
|
||||
/**
|
||||
* Mapping of model type to human readable name
|
||||
*/
|
||||
export const MODEL_TYPE_TO_LONG_NAME: Record<ModelType, string> = {
|
||||
main: 'Main',
|
||||
vae: 'VAE',
|
||||
lora: 'LoRA',
|
||||
llava_onevision: 'LLaVA OneVision',
|
||||
control_lora: 'ControlLoRA',
|
||||
controlnet: 'ControlNet',
|
||||
t2i_adapter: 'T2I Adapter',
|
||||
ip_adapter: 'IP Adapter',
|
||||
embedding: 'Embedding',
|
||||
onnx: 'ONNX',
|
||||
clip_vision: 'CLIP Vision',
|
||||
spandrel_image_to_image: 'Spandrel (Image to Image)',
|
||||
t5_encoder: 'T5 Encoder',
|
||||
clip_embed: 'CLIP Embed',
|
||||
siglip: 'SigLIP',
|
||||
flux_redux: 'FLUX Redux',
|
||||
video: 'Video',
|
||||
unknown: 'Unknown',
|
||||
};
|
||||
|
||||
/**
|
||||
* Mapping of model base to human readable name
|
||||
*/
|
||||
export const MODEL_BASE_TO_LONG_NAME: Record<BaseModelType, string> = {
|
||||
any: 'Any',
|
||||
'sd-1': 'Stable Diffusion 1.x',
|
||||
'sd-2': 'Stable Diffusion 2.x',
|
||||
'sd-3': 'Stable Diffusion 3.x',
|
||||
sdxl: 'Stable Diffusion XL',
|
||||
'sdxl-refiner': 'Stable Diffusion XL Refiner',
|
||||
flux: 'FLUX',
|
||||
cogview4: 'CogView4',
|
||||
imagen3: 'Imagen3',
|
||||
imagen4: 'Imagen4',
|
||||
'chatgpt-4o': 'ChatGPT 4o',
|
||||
'flux-kontext': 'Flux Kontext',
|
||||
'gemini-2.5': 'Gemini 2.5',
|
||||
veo3: 'Veo3',
|
||||
runway: 'Runway',
|
||||
unknown: 'Unknown',
|
||||
};
|
||||
|
||||
/**
|
||||
* Mapping of model base to short human readable name
|
||||
*/
|
||||
export const MODEL_BASE_TO_SHORT_NAME: Record<BaseModelType, string> = {
|
||||
any: 'Any',
|
||||
'sd-1': 'SD1.X',
|
||||
'sd-2': 'SD2.X',
|
||||
'sd-3': 'SD3.X',
|
||||
sdxl: 'SDXL',
|
||||
'sdxl-refiner': 'SDXLR',
|
||||
flux: 'FLUX',
|
||||
cogview4: 'CogView4',
|
||||
imagen3: 'Imagen3',
|
||||
imagen4: 'Imagen4',
|
||||
'chatgpt-4o': 'ChatGPT 4o',
|
||||
'flux-kontext': 'Flux Kontext',
|
||||
'gemini-2.5': 'Gemini 2.5',
|
||||
veo3: 'Veo3',
|
||||
runway: 'Runway',
|
||||
unknown: 'Unknown',
|
||||
};
|
||||
|
||||
export const MODEL_VARIANT_TO_LONG_NAME: Record<ModelVariantType, string> = {
|
||||
normal: 'Normal',
|
||||
inpaint: 'Inpaint',
|
||||
depth: 'Depth',
|
||||
};
|
||||
|
||||
export const MODEL_FORMAT_TO_LONG_NAME: Record<ModelFormat, string> = {
|
||||
omi: 'OMI',
|
||||
diffusers: 'Diffusers',
|
||||
checkpoint: 'Checkpoint',
|
||||
lycoris: 'LyCORIS',
|
||||
onnx: 'ONNX',
|
||||
olive: 'Olive',
|
||||
embedding_file: 'Embedding (file)',
|
||||
embedding_folder: 'Embedding (folder)',
|
||||
invokeai: 'InvokeAI',
|
||||
t5_encoder: 'T5 Encoder',
|
||||
bnb_quantized_int8b: 'BNB Quantized (int8b)',
|
||||
bnb_quantized_nf4b: 'BNB Quantized (nf4b)',
|
||||
gguf_quantized: 'GGUF Quantized',
|
||||
api: 'API',
|
||||
unknown: 'Unknown',
|
||||
};
|
||||
|
||||
/**
|
||||
* List of base models that make API requests
|
||||
*/
|
||||
export const API_BASE_MODELS: BaseModelType[] = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext', 'gemini-2.5'];
|
||||
|
||||
export const SUPPORTS_SEED_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4'];
|
||||
|
||||
export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3'];
|
||||
|
||||
export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'flux-kontext',
|
||||
'chatgpt-4o',
|
||||
'gemini-2.5',
|
||||
];
|
||||
|
||||
export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sdxl',
|
||||
'cogview4',
|
||||
'sd-3',
|
||||
'imagen3',
|
||||
'imagen4',
|
||||
];
|
||||
|
||||
export const SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sd-3',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'cogview4',
|
||||
];
|
||||
|
||||
export const SUPPORTS_ASPECT_RATIO_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sd-3',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'cogview4',
|
||||
'imagen3',
|
||||
'imagen4',
|
||||
'flux-kontext',
|
||||
'chatgpt-4o',
|
||||
];
|
||||
|
||||
export const VIDEO_BASE_MODELS = ['veo3', 'runway'];
|
||||
|
||||
export const REQUIRES_STARTING_FRAME_BASE_MODELS = ['runway'];
|
||||
@@ -1,34 +1,16 @@
|
||||
import { Badge } from '@invoke-ai/ui-library';
|
||||
import { MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
import { MODEL_BASE_TO_COLOR, MODEL_BASE_TO_SHORT_NAME } from 'features/modelManagerV2/models';
|
||||
import type { BaseModelType } from 'features/nodes/types/common';
|
||||
import { memo } from 'react';
|
||||
import type { BaseModelType } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
base: BaseModelType;
|
||||
};
|
||||
|
||||
export const BASE_COLOR_MAP: Record<BaseModelType, string> = {
|
||||
any: 'base',
|
||||
'sd-1': 'green',
|
||||
'sd-2': 'teal',
|
||||
'sd-3': 'purple',
|
||||
sdxl: 'invokeBlue',
|
||||
'sdxl-refiner': 'invokeBlue',
|
||||
flux: 'gold',
|
||||
cogview4: 'red',
|
||||
imagen3: 'pink',
|
||||
imagen4: 'pink',
|
||||
'chatgpt-4o': 'pink',
|
||||
'flux-kontext': 'pink',
|
||||
'gemini-2.5': 'pink',
|
||||
veo3: 'purple',
|
||||
runway: 'green',
|
||||
};
|
||||
|
||||
const ModelBaseBadge = ({ base }: Props) => {
|
||||
return (
|
||||
<Badge flexGrow={0} flexShrink={0} colorScheme={BASE_COLOR_MAP[base]} variant="subtle" h="min-content">
|
||||
{MODEL_TYPE_SHORT_MAP[base]}
|
||||
<Badge flexGrow={0} flexShrink={0} colorScheme={MODEL_BASE_TO_COLOR[base]} variant="subtle" h="min-content">
|
||||
{MODEL_BASE_TO_SHORT_NAME[base]}
|
||||
</Badge>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -19,6 +19,7 @@ const FORMAT_NAME_MAP: Record<AnyModelConfig['format'], string> = {
|
||||
gguf_quantized: 'gguf',
|
||||
api: 'api',
|
||||
omi: 'omi',
|
||||
unknown: 'unknown',
|
||||
};
|
||||
|
||||
const FORMAT_COLOR_MAP: Record<AnyModelConfig['format'], string> = {
|
||||
@@ -34,6 +35,7 @@ const FORMAT_COLOR_MAP: Record<AnyModelConfig['format'], string> = {
|
||||
bnb_quantized_nf4b: 'base',
|
||||
gguf_quantized: 'base',
|
||||
api: 'base',
|
||||
unknown: 'red',
|
||||
};
|
||||
|
||||
const ModelFormatBadge = ({ format }: Props) => {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { MODEL_CATEGORIES_AS_LIST } from 'features/modelManagerV2/models';
|
||||
import {
|
||||
type FilterableModelType,
|
||||
selectFilteredModelType,
|
||||
@@ -8,274 +10,50 @@ import {
|
||||
} from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
useCLIPEmbedModels,
|
||||
useCLIPVisionModels,
|
||||
useControlLoRAModel,
|
||||
useControlNetModels,
|
||||
useEmbeddingModels,
|
||||
useFluxReduxModels,
|
||||
useIPAdapterModels,
|
||||
useLLaVAModels,
|
||||
useLoRAModels,
|
||||
useMainModels,
|
||||
useRefinerModels,
|
||||
useSigLipModels,
|
||||
useSpandrelImageToImageModels,
|
||||
useT2IAdapterModels,
|
||||
useT5EncoderModels,
|
||||
useVAEModels,
|
||||
} from 'services/api/hooks/modelsByType';
|
||||
import { modelConfigsAdapterSelectors, useGetModelConfigsQuery } from 'services/api/endpoints/models';
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
|
||||
import { FetchingModelsLoader } from './FetchingModelsLoader';
|
||||
import { ModelListWrapper } from './ModelListWrapper';
|
||||
|
||||
const log = logger('models');
|
||||
|
||||
const ModelList = () => {
|
||||
const filteredModelType = useAppSelector(selectFilteredModelType);
|
||||
const searchTerm = useAppSelector(selectSearchTerm);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const [mainModels, { isLoading: isLoadingMainModels }] = useMainModels();
|
||||
const filteredMainModels = useMemo(
|
||||
() => modelsFilter(mainModels, searchTerm, filteredModelType),
|
||||
[mainModels, searchTerm, filteredModelType]
|
||||
);
|
||||
const { data, isLoading } = useGetModelConfigsQuery();
|
||||
|
||||
const [refinerModels, { isLoading: isLoadingRefinerModels }] = useRefinerModels();
|
||||
const filteredRefinerModels = useMemo(
|
||||
() => modelsFilter(refinerModels, searchTerm, filteredModelType),
|
||||
[refinerModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [loraModels, { isLoading: isLoadingLoRAModels }] = useLoRAModels();
|
||||
const filteredLoRAModels = useMemo(
|
||||
() => modelsFilter(loraModels, searchTerm, filteredModelType),
|
||||
[loraModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [embeddingModels, { isLoading: isLoadingEmbeddingModels }] = useEmbeddingModels();
|
||||
const filteredEmbeddingModels = useMemo(
|
||||
() => modelsFilter(embeddingModels, searchTerm, filteredModelType),
|
||||
[embeddingModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [controlNetModels, { isLoading: isLoadingControlNetModels }] = useControlNetModels();
|
||||
const filteredControlNetModels = useMemo(
|
||||
() => modelsFilter(controlNetModels, searchTerm, filteredModelType),
|
||||
[controlNetModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [t2iAdapterModels, { isLoading: isLoadingT2IAdapterModels }] = useT2IAdapterModels();
|
||||
const filteredT2IAdapterModels = useMemo(
|
||||
() => modelsFilter(t2iAdapterModels, searchTerm, filteredModelType),
|
||||
[t2iAdapterModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [ipAdapterModels, { isLoading: isLoadingIPAdapterModels }] = useIPAdapterModels();
|
||||
const filteredIPAdapterModels = useMemo(
|
||||
() => modelsFilter(ipAdapterModels, searchTerm, filteredModelType),
|
||||
[ipAdapterModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [clipVisionModels, { isLoading: isLoadingCLIPVisionModels }] = useCLIPVisionModels();
|
||||
const filteredCLIPVisionModels = useMemo(
|
||||
() => modelsFilter(clipVisionModels, searchTerm, filteredModelType),
|
||||
[clipVisionModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [vaeModels, { isLoading: isLoadingVAEModels }] = useVAEModels({ excludeSubmodels: true });
|
||||
const filteredVAEModels = useMemo(
|
||||
() => modelsFilter(vaeModels, searchTerm, filteredModelType),
|
||||
[vaeModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [t5EncoderModels, { isLoading: isLoadingT5EncoderModels }] = useT5EncoderModels({ excludeSubmodels: true });
|
||||
const filteredT5EncoderModels = useMemo(
|
||||
() => modelsFilter(t5EncoderModels, searchTerm, filteredModelType),
|
||||
[t5EncoderModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [controlLoRAModels, { isLoading: isLoadingControlLoRAModels }] = useControlLoRAModel();
|
||||
const filteredControlLoRAModels = useMemo(
|
||||
() => modelsFilter(controlLoRAModels, searchTerm, filteredModelType),
|
||||
[controlLoRAModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [clipEmbedModels, { isLoading: isLoadingClipEmbedModels }] = useCLIPEmbedModels({ excludeSubmodels: true });
|
||||
const filteredClipEmbedModels = useMemo(
|
||||
() => modelsFilter(clipEmbedModels, searchTerm, filteredModelType),
|
||||
[clipEmbedModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [spandrelImageToImageModels, { isLoading: isLoadingSpandrelImageToImageModels }] =
|
||||
useSpandrelImageToImageModels();
|
||||
const filteredSpandrelImageToImageModels = useMemo(
|
||||
() => modelsFilter(spandrelImageToImageModels, searchTerm, filteredModelType),
|
||||
[spandrelImageToImageModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [sigLipModels, { isLoading: isLoadingSigLipModels }] = useSigLipModels();
|
||||
const filteredSigLipModels = useMemo(
|
||||
() => modelsFilter(sigLipModels, searchTerm, filteredModelType),
|
||||
[sigLipModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [fluxReduxModels, { isLoading: isLoadingFluxReduxModels }] = useFluxReduxModels();
|
||||
const filteredFluxReduxModels = useMemo(
|
||||
() => modelsFilter(fluxReduxModels, searchTerm, filteredModelType),
|
||||
[fluxReduxModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const [llavaOneVisionModels, { isLoading: isLoadingLlavaOneVisionModels }] = useLLaVAModels();
|
||||
const filteredLlavaOneVisionModels = useMemo(
|
||||
() => modelsFilter(llavaOneVisionModels, searchTerm, filteredModelType),
|
||||
[llavaOneVisionModels, searchTerm, filteredModelType]
|
||||
);
|
||||
|
||||
const totalFilteredModels = useMemo(() => {
|
||||
return (
|
||||
filteredMainModels.length +
|
||||
filteredRefinerModels.length +
|
||||
filteredLoRAModels.length +
|
||||
filteredEmbeddingModels.length +
|
||||
filteredControlNetModels.length +
|
||||
filteredT2IAdapterModels.length +
|
||||
filteredIPAdapterModels.length +
|
||||
filteredCLIPVisionModels.length +
|
||||
filteredVAEModels.length +
|
||||
filteredSpandrelImageToImageModels.length +
|
||||
filteredSigLipModels.length +
|
||||
filteredFluxReduxModels.length +
|
||||
t5EncoderModels.length +
|
||||
clipEmbedModels.length +
|
||||
controlLoRAModels.length
|
||||
);
|
||||
}, [
|
||||
filteredControlNetModels.length,
|
||||
filteredEmbeddingModels.length,
|
||||
filteredIPAdapterModels.length,
|
||||
filteredCLIPVisionModels.length,
|
||||
filteredLoRAModels.length,
|
||||
filteredMainModels.length,
|
||||
filteredRefinerModels.length,
|
||||
filteredT2IAdapterModels.length,
|
||||
filteredVAEModels.length,
|
||||
filteredSpandrelImageToImageModels.length,
|
||||
filteredSigLipModels.length,
|
||||
filteredFluxReduxModels.length,
|
||||
t5EncoderModels.length,
|
||||
clipEmbedModels.length,
|
||||
controlLoRAModels.length,
|
||||
]);
|
||||
const models = useMemo(() => {
|
||||
const modelConfigs = modelConfigsAdapterSelectors.selectAll(data ?? { ids: [], entities: {} });
|
||||
const baseFilteredModelConfigs = modelsFilter(modelConfigs, searchTerm, filteredModelType);
|
||||
const byCategory: { i18nKey: string; configs: AnyModelConfig[] }[] = [];
|
||||
const total = baseFilteredModelConfigs.length;
|
||||
let renderedTotal = 0;
|
||||
for (const { i18nKey, filter } of MODEL_CATEGORIES_AS_LIST) {
|
||||
const configs = baseFilteredModelConfigs.filter(filter);
|
||||
renderedTotal += configs.length;
|
||||
byCategory.push({ i18nKey, configs });
|
||||
}
|
||||
if (renderedTotal !== total) {
|
||||
const ctx = { total, renderedTotal, difference: total - renderedTotal };
|
||||
log.warn(
|
||||
ctx,
|
||||
`ModelList: Not all models were categorized - ensure all possible models are covered in MODEL_CATEGORIES`
|
||||
);
|
||||
}
|
||||
return { total, byCategory };
|
||||
}, [data, filteredModelType, searchTerm]);
|
||||
|
||||
return (
|
||||
<ScrollableContent>
|
||||
<Flex flexDirection="column" w="full" h="full" gap={4}>
|
||||
{/* Main Model List */}
|
||||
{isLoadingMainModels && <FetchingModelsLoader loadingMessage="Loading Main Models..." />}
|
||||
{!isLoadingMainModels && filteredMainModels.length > 0 && (
|
||||
<ModelListWrapper title={t('modelManager.main')} modelList={filteredMainModels} key="main" />
|
||||
)}
|
||||
{/* Refiner Model List */}
|
||||
{isLoadingRefinerModels && <FetchingModelsLoader loadingMessage="Loading Refiner Models..." />}
|
||||
{!isLoadingRefinerModels && filteredRefinerModels.length > 0 && (
|
||||
<ModelListWrapper title={t('sdxl.refiner')} modelList={filteredRefinerModels} key="refiner" />
|
||||
)}
|
||||
{/* LoRAs List */}
|
||||
{isLoadingLoRAModels && <FetchingModelsLoader loadingMessage="Loading LoRAs..." />}
|
||||
{!isLoadingLoRAModels && filteredLoRAModels.length > 0 && (
|
||||
<ModelListWrapper title={t('modelManager.loraModels')} modelList={filteredLoRAModels} key="loras" />
|
||||
)}
|
||||
|
||||
{/* TI List */}
|
||||
{isLoadingEmbeddingModels && <FetchingModelsLoader loadingMessage="Loading Textual Inversions..." />}
|
||||
{!isLoadingEmbeddingModels && filteredEmbeddingModels.length > 0 && (
|
||||
<ModelListWrapper
|
||||
title={t('modelManager.textualInversions')}
|
||||
modelList={filteredEmbeddingModels}
|
||||
key="textual-inversions"
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* VAE List */}
|
||||
{isLoadingVAEModels && <FetchingModelsLoader loadingMessage="Loading VAEs..." />}
|
||||
{!isLoadingVAEModels && filteredVAEModels.length > 0 && (
|
||||
<ModelListWrapper title="VAE" modelList={filteredVAEModels} key="vae" />
|
||||
)}
|
||||
|
||||
{/* Controlnet List */}
|
||||
{isLoadingControlNetModels && <FetchingModelsLoader loadingMessage="Loading ControlNets..." />}
|
||||
{!isLoadingControlNetModels && filteredControlNetModels.length > 0 && (
|
||||
<ModelListWrapper title="ControlNet" modelList={filteredControlNetModels} key="controlnets" />
|
||||
)}
|
||||
{/* IP Adapter List */}
|
||||
{isLoadingIPAdapterModels && <FetchingModelsLoader loadingMessage="Loading IP Adapters..." />}
|
||||
{!isLoadingIPAdapterModels && filteredIPAdapterModels.length > 0 && (
|
||||
<ModelListWrapper title={t('common.ipAdapter')} modelList={filteredIPAdapterModels} key="ip-adapters" />
|
||||
)}
|
||||
{/* CLIP Vision List */}
|
||||
{isLoadingCLIPVisionModels && <FetchingModelsLoader loadingMessage="Loading CLIP Vision Models..." />}
|
||||
{!isLoadingCLIPVisionModels && filteredCLIPVisionModels.length > 0 && (
|
||||
<ModelListWrapper title="CLIP Vision" modelList={filteredCLIPVisionModels} key="clip-vision" />
|
||||
)}
|
||||
{/* T2I Adapters List */}
|
||||
{isLoadingT2IAdapterModels && <FetchingModelsLoader loadingMessage="Loading T2I Adapters..." />}
|
||||
{!isLoadingT2IAdapterModels && filteredT2IAdapterModels.length > 0 && (
|
||||
<ModelListWrapper title={t('common.t2iAdapter')} modelList={filteredT2IAdapterModels} key="t2i-adapters" />
|
||||
)}
|
||||
{/* T5 Encoders List */}
|
||||
{isLoadingT5EncoderModels && <FetchingModelsLoader loadingMessage="Loading T5 Encoder Models..." />}
|
||||
{!isLoadingT5EncoderModels && filteredT5EncoderModels.length > 0 && (
|
||||
<ModelListWrapper title={t('modelManager.t5Encoder')} modelList={filteredT5EncoderModels} key="t5-encoder" />
|
||||
)}
|
||||
{/* Control Lora List */}
|
||||
{isLoadingControlLoRAModels && <FetchingModelsLoader loadingMessage="Loading Control Loras..." />}
|
||||
{!isLoadingControlLoRAModels && filteredControlLoRAModels.length > 0 && (
|
||||
<ModelListWrapper
|
||||
title={t('modelManager.controlLora')}
|
||||
modelList={filteredControlLoRAModels}
|
||||
key="control-lora"
|
||||
/>
|
||||
)}
|
||||
{/* Clip Embed List */}
|
||||
{isLoadingClipEmbedModels && <FetchingModelsLoader loadingMessage="Loading Clip Embed Models..." />}
|
||||
{!isLoadingClipEmbedModels && filteredClipEmbedModels.length > 0 && (
|
||||
<ModelListWrapper title={t('modelManager.clipEmbed')} modelList={filteredClipEmbedModels} key="clip-embed" />
|
||||
)}
|
||||
|
||||
{/* LLaVA OneVision List */}
|
||||
{isLoadingLlavaOneVisionModels && <FetchingModelsLoader loadingMessage="Loading LLaVA OneVision Models..." />}
|
||||
{!isLoadingLlavaOneVisionModels && filteredLlavaOneVisionModels.length > 0 && (
|
||||
<ModelListWrapper
|
||||
title={t('modelManager.llavaOnevision')}
|
||||
modelList={filteredLlavaOneVisionModels}
|
||||
key="llava-onevision"
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* Spandrel Image to Image List */}
|
||||
{isLoadingSpandrelImageToImageModels && (
|
||||
<FetchingModelsLoader loadingMessage="Loading Image-to-Image Models..." />
|
||||
)}
|
||||
{!isLoadingSpandrelImageToImageModels && filteredSpandrelImageToImageModels.length > 0 && (
|
||||
<ModelListWrapper
|
||||
title={t('modelManager.spandrelImageToImage')}
|
||||
modelList={filteredSpandrelImageToImageModels}
|
||||
key="spandrel-image-to-image"
|
||||
/>
|
||||
)}
|
||||
{/* SigLIP List */}
|
||||
{isLoadingSigLipModels && <FetchingModelsLoader loadingMessage="Loading SigLIP Models..." />}
|
||||
{!isLoadingSigLipModels && filteredSigLipModels.length > 0 && (
|
||||
<ModelListWrapper title={t('modelManager.sigLip')} modelList={filteredSigLipModels} key="sig-lip" />
|
||||
)}
|
||||
{/* Flux Redux List */}
|
||||
{isLoadingFluxReduxModels && <FetchingModelsLoader loadingMessage="Loading Flux Redux Models..." />}
|
||||
{!isLoadingFluxReduxModels && filteredFluxReduxModels.length > 0 && (
|
||||
<ModelListWrapper title={t('modelManager.fluxRedux')} modelList={filteredFluxReduxModels} key="flux-redux" />
|
||||
)}
|
||||
{totalFilteredModels === 0 && (
|
||||
{isLoading && <FetchingModelsLoader loadingMessage="Loading..." />}
|
||||
{models.byCategory.map(({ i18nKey, configs }) => (
|
||||
<ModelListWrapper key={i18nKey} title={t(i18nKey)} modelList={configs} />
|
||||
))}
|
||||
{!isLoading && models.total === 0 && (
|
||||
<Flex w="full" h="full" alignItems="center" justifyContent="center">
|
||||
<Text>{t('modelManager.noMatchingModels')}</Text>
|
||||
</Flex>
|
||||
@@ -293,7 +71,13 @@ const modelsFilter = <T extends AnyModelConfig>(
|
||||
filteredModelType: FilterableModelType | null
|
||||
): T[] => {
|
||||
return data.filter((model) => {
|
||||
const matchesFilter = model.name.toLowerCase().includes(nameFilter.toLowerCase());
|
||||
const matchesFilter =
|
||||
model.name.toLowerCase().includes(nameFilter.toLowerCase()) ||
|
||||
model.base.toLowerCase().includes(nameFilter.toLowerCase()) ||
|
||||
model.type.toLowerCase().includes(nameFilter.toLowerCase()) ||
|
||||
model.description?.toLowerCase().includes(nameFilter.toLowerCase()) ||
|
||||
model.format.toLowerCase().includes(nameFilter.toLowerCase());
|
||||
|
||||
const matchesType = getMatchesType(model, filteredModelType);
|
||||
|
||||
return matchesFilter && matchesType;
|
||||
|
||||
@@ -25,6 +25,9 @@ const contentSx = {
|
||||
|
||||
export const ModelListWrapper = memo((props: ModelListWrapperProps) => {
|
||||
const { title, modelList } = props;
|
||||
if (modelList.length === 0) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<StickyScrollable title={title} contentSx={contentSx} headingSx={headingSx}>
|
||||
{modelList.map((model) => (
|
||||
|
||||
@@ -1,46 +1,17 @@
|
||||
import { Button, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { FilterableModelType } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import type { ModelCategoryData } from 'features/modelManagerV2/models';
|
||||
import { MODEL_CATEGORIES, MODEL_CATEGORIES_AS_LIST } from 'features/modelManagerV2/models';
|
||||
import { selectFilteredModelType, setFilteredModelType } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFunnelBold } from 'react-icons/pi';
|
||||
import { objectKeys } from 'tsafe';
|
||||
|
||||
export const ModelTypeFilter = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const MODEL_TYPE_LABELS: Record<FilterableModelType, string> = useMemo(
|
||||
() => ({
|
||||
main: t('modelManager.main'),
|
||||
refiner: t('sdxl.refiner'),
|
||||
lora: 'LoRA',
|
||||
embedding: t('modelManager.textualInversions'),
|
||||
controlnet: 'ControlNet',
|
||||
vae: 'VAE',
|
||||
t2i_adapter: t('common.t2iAdapter'),
|
||||
t5_encoder: t('modelManager.t5Encoder'),
|
||||
clip_embed: t('modelManager.clipEmbed'),
|
||||
ip_adapter: t('common.ipAdapter'),
|
||||
clip_vision: 'CLIP Vision',
|
||||
spandrel_image_to_image: t('modelManager.spandrelImageToImage'),
|
||||
control_lora: t('modelManager.controlLora'),
|
||||
siglip: t('modelManager.sigLip'),
|
||||
flux_redux: t('modelManager.fluxRedux'),
|
||||
llava_onevision: t('modelManager.llavaOnevision'),
|
||||
video: t('modelManager.video'),
|
||||
}),
|
||||
[t]
|
||||
);
|
||||
const filteredModelType = useAppSelector(selectFilteredModelType);
|
||||
|
||||
const selectModelType = useCallback(
|
||||
(option: FilterableModelType) => {
|
||||
dispatch(setFilteredModelType(option));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const clearModelType = useCallback(() => {
|
||||
dispatch(setFilteredModelType(null));
|
||||
}, [dispatch]);
|
||||
@@ -48,18 +19,12 @@ export const ModelTypeFilter = memo(() => {
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton as={Button} size="sm" leftIcon={<PiFunnelBold />}>
|
||||
{filteredModelType ? MODEL_TYPE_LABELS[filteredModelType] : t('modelManager.allModels')}
|
||||
{filteredModelType ? t(MODEL_CATEGORIES[filteredModelType].i18nKey) : t('modelManager.allModels')}
|
||||
</MenuButton>
|
||||
<MenuList>
|
||||
<MenuItem onClick={clearModelType}>{t('modelManager.allModels')}</MenuItem>
|
||||
{objectKeys(MODEL_TYPE_LABELS).map((option) => (
|
||||
<MenuItem
|
||||
key={option}
|
||||
bg={filteredModelType === option ? 'base.700' : 'transparent'}
|
||||
onClick={selectModelType.bind(null, option)}
|
||||
>
|
||||
{MODEL_TYPE_LABELS[option]}
|
||||
</MenuItem>
|
||||
{MODEL_CATEGORIES_AS_LIST.map((data) => (
|
||||
<ModelMenuItem key={data.category} data={data} />
|
||||
))}
|
||||
</MenuList>
|
||||
</Menu>
|
||||
@@ -67,3 +32,18 @@ export const ModelTypeFilter = memo(() => {
|
||||
});
|
||||
|
||||
ModelTypeFilter.displayName = 'ModelTypeFilter';
|
||||
|
||||
const ModelMenuItem = memo(({ data }: { data: ModelCategoryData }) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const filteredModelType = useAppSelector(selectFilteredModelType);
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(setFilteredModelType(data.category));
|
||||
}, [data.category, dispatch]);
|
||||
return (
|
||||
<MenuItem bg={filteredModelType === data.category ? 'base.700' : 'transparent'} onClick={onClick}>
|
||||
{t(data.i18nKey)}
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
ModelMenuItem.displayName = 'ModelMenuItem';
|
||||
|
||||
@@ -1,20 +1,17 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox } from '@invoke-ai/ui-library';
|
||||
import { typedMemo } from 'common/util/typedMemo';
|
||||
import { MODEL_TYPE_MAP } from 'features/parameters/types/constants';
|
||||
import { MODEL_BASE_TO_LONG_NAME } from 'features/modelManagerV2/models';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import type { Control } from 'react-hook-form';
|
||||
import { useController } from 'react-hook-form';
|
||||
import type { UpdateModelArg } from 'services/api/endpoints/models';
|
||||
import { objectEntries } from 'tsafe';
|
||||
|
||||
const options: ComboboxOption[] = [
|
||||
{ value: 'sd-1', label: MODEL_TYPE_MAP['sd-1'] },
|
||||
{ value: 'sd-2', label: MODEL_TYPE_MAP['sd-2'] },
|
||||
{ value: 'sd-3', label: MODEL_TYPE_MAP['sd-3'] },
|
||||
{ value: 'flux', label: MODEL_TYPE_MAP['flux'] },
|
||||
{ value: 'sdxl', label: MODEL_TYPE_MAP['sdxl'] },
|
||||
{ value: 'sdxl-refiner', label: MODEL_TYPE_MAP['sdxl-refiner'] },
|
||||
];
|
||||
const options: ComboboxOption[] = objectEntries(MODEL_BASE_TO_LONG_NAME).map(([value, label]) => ({
|
||||
label,
|
||||
value,
|
||||
}));
|
||||
|
||||
type Props = {
|
||||
control: Control<UpdateModelArg['body']>;
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox } from '@invoke-ai/ui-library';
|
||||
import { typedMemo } from 'common/util/typedMemo';
|
||||
import { MODEL_FORMAT_TO_LONG_NAME } from 'features/modelManagerV2/models';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import type { Control } from 'react-hook-form';
|
||||
import { useController } from 'react-hook-form';
|
||||
import type { UpdateModelArg } from 'services/api/endpoints/models';
|
||||
import { objectEntries } from 'tsafe';
|
||||
|
||||
const options: ComboboxOption[] = objectEntries(MODEL_FORMAT_TO_LONG_NAME).map(([value, label]) => ({
|
||||
label,
|
||||
value,
|
||||
}));
|
||||
|
||||
type Props = {
|
||||
control: Control<UpdateModelArg['body']>;
|
||||
};
|
||||
|
||||
const ModelFormatSelect = ({ control }: Props) => {
|
||||
const { field } = useController({ control, name: 'format' });
|
||||
const value = useMemo(() => options.find((o) => o.value === field.value), [field.value]);
|
||||
const onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
field.onChange(v?.value);
|
||||
},
|
||||
[field]
|
||||
);
|
||||
return <Combobox value={value} options={options} onChange={onChange} />;
|
||||
};
|
||||
|
||||
export default typedMemo(ModelFormatSelect);
|
||||
@@ -0,0 +1,32 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox } from '@invoke-ai/ui-library';
|
||||
import { typedMemo } from 'common/util/typedMemo';
|
||||
import { MODEL_TYPE_TO_LONG_NAME } from 'features/modelManagerV2/models';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import type { Control } from 'react-hook-form';
|
||||
import { useController } from 'react-hook-form';
|
||||
import type { UpdateModelArg } from 'services/api/endpoints/models';
|
||||
import { objectEntries } from 'tsafe';
|
||||
|
||||
const options: ComboboxOption[] = objectEntries(MODEL_TYPE_TO_LONG_NAME).map(([value, label]) => ({
|
||||
label,
|
||||
value,
|
||||
}));
|
||||
|
||||
type Props = {
|
||||
control: Control<UpdateModelArg['body']>;
|
||||
};
|
||||
|
||||
const ModelTypeSelect = ({ control }: Props) => {
|
||||
const { field } = useController({ control, name: 'type' });
|
||||
const value = useMemo(() => options.find((o) => o.value === field.value), [field.value]);
|
||||
const onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
field.onChange(v?.value);
|
||||
},
|
||||
[field]
|
||||
);
|
||||
return <Combobox value={value} options={options} onChange={onChange} />;
|
||||
};
|
||||
|
||||
export default typedMemo(ModelTypeSelect);
|
||||
@@ -1,16 +1,14 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox } from '@invoke-ai/ui-library';
|
||||
import { typedMemo } from 'common/util/typedMemo';
|
||||
import { MODEL_VARIANT_TO_LONG_NAME } from 'features/modelManagerV2/models';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import type { Control } from 'react-hook-form';
|
||||
import { useController } from 'react-hook-form';
|
||||
import type { UpdateModelArg } from 'services/api/endpoints/models';
|
||||
import { objectEntries } from 'tsafe';
|
||||
|
||||
const options: ComboboxOption[] = [
|
||||
{ value: 'normal', label: 'Normal' },
|
||||
{ value: 'inpaint', label: 'Inpaint' },
|
||||
{ value: 'depth', label: 'Depth' },
|
||||
];
|
||||
const options: ComboboxOption[] = objectEntries(MODEL_VARIANT_TO_LONG_NAME).map(([value, label]) => ({ label, value }));
|
||||
|
||||
type Props = {
|
||||
control: Control<UpdateModelArg['body']>;
|
||||
|
||||
@@ -22,6 +22,8 @@ import { type UpdateModelArg, useUpdateModelMutation } from 'services/api/endpoi
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
|
||||
import BaseModelSelect from './Fields/BaseModelSelect';
|
||||
import ModelFormatSelect from './Fields/ModelFormatSelect';
|
||||
import ModelTypeSelect from './Fields/ModelTypeSelect';
|
||||
import ModelVariantSelect from './Fields/ModelVariantSelect';
|
||||
import PredictionTypeSelect from './Fields/PredictionTypeSelect';
|
||||
import { ModelFooter } from './ModelFooter';
|
||||
@@ -127,6 +129,14 @@ export const ModelEdit = memo(({ modelConfig }: Props) => {
|
||||
</Heading>
|
||||
)}
|
||||
<SimpleGrid columns={2} gap={4}>
|
||||
<FormControl flexDir="column" alignItems="flex-start" gap={1}>
|
||||
<FormLabel>{t('modelManager.modelType')}</FormLabel>
|
||||
<ModelTypeSelect control={form.control} />
|
||||
</FormControl>
|
||||
<FormControl flexDir="column" alignItems="flex-start" gap={1}>
|
||||
<FormLabel>{t('modelManager.modelFormat')}</FormLabel>
|
||||
<ModelFormatSelect control={form.control} />
|
||||
</FormControl>
|
||||
{modelConfig.type !== 'clip_vision' && (
|
||||
<FormControl flexDir="column" alignItems="flex-start" gap={1}>
|
||||
<FormLabel>{t('modelManager.baseModel')}</FormLabel>
|
||||
|
||||
@@ -53,7 +53,7 @@ export const ModelView = memo(({ modelConfig }: Props) => {
|
||||
<SimpleGrid columns={2} gap={4}>
|
||||
<ModelAttrView label={t('modelManager.baseModel')} value={modelConfig.base} />
|
||||
<ModelAttrView label={t('modelManager.modelType')} value={modelConfig.type} />
|
||||
<ModelAttrView label={t('common.format')} value={modelConfig.format} />
|
||||
<ModelAttrView label={t('modelManager.modelFormat')} value={modelConfig.format} />
|
||||
<ModelAttrView label={t('modelManager.path')} value={modelConfig.path} />
|
||||
<ModelAttrView label={t('modelManager.fileSize')} value={filesize(modelConfig.file_size)} />
|
||||
{modelConfig.type === 'main' && (
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import type {
|
||||
BaseModelType,
|
||||
BoardField,
|
||||
Classification,
|
||||
ColorField,
|
||||
@@ -10,11 +9,12 @@ import type {
|
||||
ModelIdentifierField,
|
||||
ProgressImage,
|
||||
SchedulerField,
|
||||
SubModelType,
|
||||
T2IAdapterField,
|
||||
zBaseModelType,
|
||||
zClipVariantType,
|
||||
zModelFormat,
|
||||
zModelVariantType,
|
||||
zSubModelType,
|
||||
} from 'features/nodes/types/common';
|
||||
import type { Invocation, S } from 'services/api/types';
|
||||
import type { Equals, Extends } from 'tsafe';
|
||||
@@ -24,7 +24,8 @@ import type z from 'zod';
|
||||
|
||||
/**
|
||||
* These types originate from the server and are recreated as zod schemas manually, for use at runtime.
|
||||
* The tests ensure that the types are correctly recreated.
|
||||
* The tests ensure that the types are correctly recreated. If one of these tests fails, it means the zod
|
||||
* schema and the type have diverged and need to be reconciled - update the zod schema.
|
||||
*/
|
||||
|
||||
describe('Common types', () => {
|
||||
@@ -40,8 +41,8 @@ describe('Common types', () => {
|
||||
|
||||
// Model component types
|
||||
test('ModelIdentifier', () => assert<Equals<ModelIdentifierField, S['ModelIdentifierField']>>());
|
||||
test('ModelIdentifier', () => assert<Equals<BaseModelType, S['BaseModelType']>>());
|
||||
test('ModelIdentifier', () => assert<Equals<SubModelType, S['SubModelType']>>());
|
||||
test('ModelIdentifier', () => assert<Equals<z.infer<typeof zBaseModelType>, S['BaseModelType']>>());
|
||||
test('ModelIdentifier', () => assert<Equals<z.infer<typeof zSubModelType>, S['SubModelType']>>());
|
||||
test('ClipVariantType', () => assert<Equals<z.infer<typeof zClipVariantType>, S['ClipVariantType']>>());
|
||||
test('ModelVariantType', () => assert<Equals<z.infer<typeof zModelVariantType>, S['ModelVariantType']>>());
|
||||
test('ModelFormat', () => assert<Equals<z.infer<typeof zModelFormat>, S['ModelFormat']>>());
|
||||
|
||||
@@ -89,6 +89,7 @@ export const zBaseModelType = z.enum([
|
||||
'gemini-2.5',
|
||||
'veo3',
|
||||
'runway',
|
||||
'unknown',
|
||||
]);
|
||||
export type BaseModelType = z.infer<typeof zBaseModelType>;
|
||||
export const zMainModelBase = z.enum([
|
||||
@@ -126,8 +127,10 @@ export const zModelType = z.enum([
|
||||
'siglip',
|
||||
'flux_redux',
|
||||
'video',
|
||||
'unknown',
|
||||
]);
|
||||
const zSubModelType = z.enum([
|
||||
export type ModelType = z.infer<typeof zModelType>;
|
||||
export const zSubModelType = z.enum([
|
||||
'unet',
|
||||
'transformer',
|
||||
'text_encoder',
|
||||
@@ -142,10 +145,10 @@ const zSubModelType = z.enum([
|
||||
'scheduler',
|
||||
'safety_checker',
|
||||
]);
|
||||
export type SubModelType = z.infer<typeof zSubModelType>;
|
||||
|
||||
export const zClipVariantType = z.enum(['large', 'gigantic']);
|
||||
export const zModelVariantType = z.enum(['normal', 'inpaint', 'depth']);
|
||||
export type ModelVariantType = z.infer<typeof zModelVariantType>;
|
||||
export const zModelFormat = z.enum([
|
||||
'omi',
|
||||
'diffusers',
|
||||
@@ -161,7 +164,9 @@ export const zModelFormat = z.enum([
|
||||
'bnb_quantized_nf4b',
|
||||
'gguf_quantized',
|
||||
'api',
|
||||
'unknown',
|
||||
]);
|
||||
export type ModelFormat = z.infer<typeof zModelFormat>;
|
||||
|
||||
export const zModelIdentifierField = z.object({
|
||||
key: z.string().min(1),
|
||||
|
||||
@@ -2,10 +2,11 @@ import type { RootState } from 'app/store/store';
|
||||
import { generateSeeds } from 'common/util/generateSeeds';
|
||||
import { range } from 'es-toolkit/compat';
|
||||
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { API_BASE_MODELS, VIDEO_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import type { BaseModelType } from 'features/nodes/types/common';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { API_BASE_MODELS, VIDEO_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import type { components } from 'services/api/schema';
|
||||
import type { BaseModelType, Batch, EnqueueBatchArg, Invocation } from 'services/api/types';
|
||||
import type { Batch, EnqueueBatchArg, Invocation } from 'services/api/types';
|
||||
|
||||
const getExtendedPrompts = (arg: {
|
||||
seedBehaviour: SeedBehaviour;
|
||||
|
||||
@@ -7,7 +7,7 @@ import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useCLIPEmbedModels } from 'services/api/hooks/modelsByType';
|
||||
import type { CLIPGEmbedModelConfig } from 'services/api/types';
|
||||
import { isCLIPGEmbedModelConfig } from 'services/api/types';
|
||||
import { isCLIPGEmbedModelConfigOrSubmodel } from 'services/api/types';
|
||||
|
||||
const ParamCLIPEmbedModelSelect = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -25,7 +25,7 @@ const ParamCLIPEmbedModelSelect = () => {
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useModelCombobox({
|
||||
modelConfigs: modelConfigs.filter((config) => isCLIPGEmbedModelConfig(config)),
|
||||
modelConfigs: modelConfigs.filter((config) => isCLIPGEmbedModelConfigOrSubmodel(config)),
|
||||
onChange: _onChange,
|
||||
selectedModel: clipEmbedModel,
|
||||
isLoading,
|
||||
|
||||
@@ -7,7 +7,7 @@ import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useCLIPEmbedModels } from 'services/api/hooks/modelsByType';
|
||||
import type { CLIPLEmbedModelConfig } from 'services/api/types';
|
||||
import { isCLIPLEmbedModelConfig } from 'services/api/types';
|
||||
import { isCLIPLEmbedModelConfigOrSubmodel } from 'services/api/types';
|
||||
|
||||
const ParamCLIPEmbedModelSelect = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -25,7 +25,7 @@ const ParamCLIPEmbedModelSelect = () => {
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useModelCombobox({
|
||||
modelConfigs: modelConfigs.filter((config) => isCLIPLEmbedModelConfig(config)),
|
||||
modelConfigs: modelConfigs.filter((config) => isCLIPLEmbedModelConfigOrSubmodel(config)),
|
||||
onChange: _onChange,
|
||||
selectedModel: clipEmbedModel,
|
||||
isLoading,
|
||||
|
||||
@@ -24,11 +24,16 @@ import { typedMemo } from 'common/util/typedMemo';
|
||||
import { uniq } from 'es-toolkit/compat';
|
||||
import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import {
|
||||
API_BASE_MODELS,
|
||||
MODEL_BASE_TO_COLOR,
|
||||
MODEL_BASE_TO_LONG_NAME,
|
||||
MODEL_BASE_TO_SHORT_NAME,
|
||||
} from 'features/modelManagerV2/models';
|
||||
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
|
||||
import { BASE_COLOR_MAP } from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge';
|
||||
import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage';
|
||||
import type { BaseModelType } from 'features/nodes/types/common';
|
||||
import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton';
|
||||
import { API_BASE_MODELS, MODEL_TYPE_MAP, MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
@@ -37,7 +42,7 @@ import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import { PiCaretDownBold, PiLinkSimple } from 'react-icons/pi';
|
||||
import { useGetRelatedModelIdsBatchQuery } from 'services/api/endpoints/modelRelationships';
|
||||
import type { AnyModelConfig, BaseModelType } from 'services/api/types';
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
|
||||
const selectSelectedModelKeys = createMemoizedSelector(selectParamsSlice, selectLoRAsSlice, (params, loras) => {
|
||||
const keys: string[] = [];
|
||||
@@ -123,21 +128,21 @@ const getGroupNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'External API';
|
||||
}
|
||||
return MODEL_TYPE_MAP[modelConfig.base];
|
||||
return MODEL_BASE_TO_LONG_NAME[modelConfig.base];
|
||||
};
|
||||
|
||||
const getGroupShortNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'api';
|
||||
}
|
||||
return MODEL_TYPE_SHORT_MAP[modelConfig.base];
|
||||
return MODEL_BASE_TO_SHORT_NAME[modelConfig.base];
|
||||
};
|
||||
|
||||
const getGroupColorSchemeFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'pink';
|
||||
}
|
||||
return BASE_COLOR_MAP[modelConfig.base];
|
||||
return MODEL_BASE_TO_COLOR[modelConfig.base];
|
||||
};
|
||||
|
||||
const relatedModelKeysQueryOptions = {
|
||||
|
||||
@@ -21,9 +21,9 @@ import {
|
||||
zVideoDuration,
|
||||
zVideoResolution,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { REQUIRES_STARTING_FRAME_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { REQUIRES_STARTING_FRAME_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
|
||||
import { isVideoModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
@@ -1,47 +1,5 @@
|
||||
import type { ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import type { BaseModelType } from 'services/api/types';
|
||||
|
||||
/**
|
||||
* Mapping of base model to human readable name
|
||||
*/
|
||||
export const MODEL_TYPE_MAP: Record<BaseModelType, string> = {
|
||||
any: 'Any',
|
||||
'sd-1': 'Stable Diffusion 1.x',
|
||||
'sd-2': 'Stable Diffusion 2.x',
|
||||
'sd-3': 'Stable Diffusion 3.x',
|
||||
sdxl: 'Stable Diffusion XL',
|
||||
'sdxl-refiner': 'Stable Diffusion XL Refiner',
|
||||
flux: 'FLUX',
|
||||
cogview4: 'CogView4',
|
||||
imagen3: 'Imagen3',
|
||||
imagen4: 'Imagen4',
|
||||
'chatgpt-4o': 'ChatGPT 4o',
|
||||
'flux-kontext': 'Flux Kontext',
|
||||
'gemini-2.5': 'Gemini 2.5',
|
||||
veo3: 'Veo3',
|
||||
runway: 'Runway',
|
||||
};
|
||||
|
||||
/**
|
||||
* Mapping of base model to (short) human readable name
|
||||
*/
|
||||
export const MODEL_TYPE_SHORT_MAP: Record<BaseModelType, string> = {
|
||||
any: 'Any',
|
||||
'sd-1': 'SD1.X',
|
||||
'sd-2': 'SD2.X',
|
||||
'sd-3': 'SD3.X',
|
||||
sdxl: 'SDXL',
|
||||
'sdxl-refiner': 'SDXLR',
|
||||
flux: 'FLUX',
|
||||
cogview4: 'CogView4',
|
||||
imagen3: 'Imagen3',
|
||||
imagen4: 'Imagen4',
|
||||
'chatgpt-4o': 'ChatGPT 4o',
|
||||
'flux-kontext': 'Flux Kontext',
|
||||
'gemini-2.5': 'Gemini 2.5',
|
||||
veo3: 'Veo3',
|
||||
runway: 'Runway',
|
||||
};
|
||||
import type { BaseModelType } from 'features/nodes/types/common';
|
||||
|
||||
/**
|
||||
* Mapping of base model to CLIP skip parameter constraints
|
||||
@@ -136,57 +94,3 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [
|
||||
{ value: 'unipc', label: 'UniPC' },
|
||||
{ value: 'unipc_k', label: 'UniPC Karras' },
|
||||
];
|
||||
|
||||
/**
|
||||
* List of base models that make API requests
|
||||
*/
|
||||
export const API_BASE_MODELS: BaseModelType[] = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext', 'gemini-2.5'];
|
||||
|
||||
export const SUPPORTS_SEED_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4'];
|
||||
|
||||
export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3'];
|
||||
|
||||
export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'flux-kontext',
|
||||
'chatgpt-4o',
|
||||
'gemini-2.5',
|
||||
];
|
||||
|
||||
export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sdxl',
|
||||
'cogview4',
|
||||
'sd-3',
|
||||
'imagen3',
|
||||
'imagen4',
|
||||
];
|
||||
|
||||
export const SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sd-3',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'cogview4',
|
||||
];
|
||||
|
||||
export const SUPPORTS_ASPECT_RATIO_BASE_MODELS: BaseModelType[] = [
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sd-3',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'cogview4',
|
||||
'imagen3',
|
||||
'imagen4',
|
||||
'flux-kontext',
|
||||
'chatgpt-4o',
|
||||
];
|
||||
|
||||
export const VIDEO_BASE_MODELS = ['veo3', 'runway'];
|
||||
|
||||
export const REQUIRES_STARTING_FRAME_BASE_MODELS = ['runway'];
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { BaseModelType } from 'services/api/types';
|
||||
import type { BaseModelType } from 'features/nodes/types/common';
|
||||
|
||||
/**
|
||||
* Gets the optimal dimension for a given base model:
|
||||
|
||||
@@ -24,6 +24,7 @@ import {
|
||||
import type { DynamicPromptsState } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import { $isInPublishFlow } from 'features/nodes/components/sidePanel/workflow/publish';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
@@ -37,7 +38,6 @@ import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled
|
||||
import type { UpscaleState } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectVideoSlice, type VideoState } from 'features/parameters/store/videoSlice';
|
||||
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { getGridSize } from 'features/parameters/util/optimalDimension';
|
||||
import { promptExpansionApi, type PromptExpansionRequestState } from 'features/prompt/PromptExpansion/state';
|
||||
|
||||
@@ -12,12 +12,12 @@ import {
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
import { LoRAList } from 'features/lora/components/LoRAList';
|
||||
import LoRASelect from 'features/lora/components/LoRASelect';
|
||||
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import ParamCFGScale from 'features/parameters/components/Core/ParamCFGScale';
|
||||
import ParamGuidance from 'features/parameters/components/Core/ParamGuidance';
|
||||
import ParamScheduler from 'features/parameters/components/Core/ParamScheduler';
|
||||
import ParamSteps from 'features/parameters/components/Core/ParamSteps';
|
||||
import { DisabledModelWarning } from 'features/parameters/components/MainModel/DisabledModelWarning';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { MainModelPicker } from 'features/settingsAccordions/components/GenerationSettingsAccordion/MainModelPicker';
|
||||
import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle';
|
||||
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';
|
||||
|
||||
@@ -7,12 +7,12 @@ import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { selectIsApiBaseModel, selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { LoRAList } from 'features/lora/components/LoRAList';
|
||||
import LoRASelect from 'features/lora/components/LoRASelect';
|
||||
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
|
||||
import ParamGuidance from 'features/parameters/components/Core/ParamGuidance';
|
||||
import ParamSteps from 'features/parameters/components/Core/ParamSteps';
|
||||
import { DisabledModelWarning } from 'features/parameters/components/MainModel/DisabledModelWarning';
|
||||
import ParamUpscaleCFGScale from 'features/parameters/components/Upscale/ParamUpscaleCFGScale';
|
||||
import ParamUpscaleScheduler from 'features/parameters/components/Upscale/ParamUpscaleScheduler';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { MainModelPicker } from 'features/settingsAccordions/components/GenerationSettingsAccordion/MainModelPicker';
|
||||
import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle';
|
||||
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
export const githubLink = 'http://github.com/invoke-ai/InvokeAI';
|
||||
export const githubIssuesLink = 'https://github.com/invoke-ai/InvokeAI/issues';
|
||||
export const discordLink = 'https://discord.gg/ZmtBAhwWhy';
|
||||
export const websiteLink = 'https://www.invoke.com/';
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { createSelector, type Selector } from '@reduxjs/toolkit';
|
||||
import type { Selector } from '@reduxjs/toolkit';
|
||||
import { EMPTY_ARRAY } from 'app/store/constants';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { useMemo } from 'react';
|
||||
@@ -10,10 +10,8 @@ import {
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
import {
|
||||
isChatGPT4oModelConfig,
|
||||
isCLIPEmbedModelConfig,
|
||||
isCLIPVisionModelConfig,
|
||||
isCLIPEmbedModelConfigOrSubmodel,
|
||||
isControlLayerModelConfig,
|
||||
isControlLoRAModelConfig,
|
||||
isControlNetModelConfig,
|
||||
isFluxKontextApiModelConfig,
|
||||
isFluxKontextModelConfig,
|
||||
@@ -21,26 +19,18 @@ import {
|
||||
isFluxVAEModelConfig,
|
||||
isGemini2_5ModelConfig,
|
||||
isIPAdapterModelConfig,
|
||||
isLLaVAModelConfig,
|
||||
isLoRAModelConfig,
|
||||
isNonRefinerMainModelConfig,
|
||||
isRefinerMainModelModelConfig,
|
||||
isSigLipModelConfig,
|
||||
isSpandrelImageToImageModelConfig,
|
||||
isT2IAdapterModelConfig,
|
||||
isT5EncoderModelConfig,
|
||||
isT5EncoderModelConfigOrSubmodel,
|
||||
isTIModelConfig,
|
||||
isVAEModelConfig,
|
||||
isVAEModelConfigOrSubmodel,
|
||||
isVideoModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
type ModelHookArgs = { excludeSubmodels?: boolean };
|
||||
|
||||
const buildModelsHook =
|
||||
<T extends AnyModelConfig>(
|
||||
typeGuard: (config: AnyModelConfig, excludeSubmodels?: boolean) => config is T,
|
||||
excludeSubmodels?: boolean
|
||||
) =>
|
||||
<T extends AnyModelConfig>(typeGuard: (config: AnyModelConfig) => config is T) =>
|
||||
(filter: (config: T) => boolean = () => true) => {
|
||||
const result = useGetModelConfigsQuery(undefined);
|
||||
const modelConfigs = useMemo(() => {
|
||||
@@ -50,7 +40,7 @@ const buildModelsHook =
|
||||
|
||||
return modelConfigsAdapterSelectors
|
||||
.selectAll(result.data)
|
||||
.filter((config) => typeGuard(config, excludeSubmodels))
|
||||
.filter((config) => typeGuard(config))
|
||||
.filter(filter);
|
||||
}, [filter, result.data]);
|
||||
|
||||
@@ -59,23 +49,14 @@ const buildModelsHook =
|
||||
export const useMainModels = buildModelsHook(isNonRefinerMainModelConfig);
|
||||
export const useRefinerModels = buildModelsHook(isRefinerMainModelModelConfig);
|
||||
export const useLoRAModels = buildModelsHook(isLoRAModelConfig);
|
||||
export const useControlLoRAModel = buildModelsHook(isControlLoRAModelConfig);
|
||||
export const useControlLayerModels = buildModelsHook(isControlLayerModelConfig);
|
||||
export const useControlNetModels = buildModelsHook(isControlNetModelConfig);
|
||||
export const useT2IAdapterModels = buildModelsHook(isT2IAdapterModelConfig);
|
||||
export const useT5EncoderModels = (args?: ModelHookArgs) =>
|
||||
buildModelsHook(isT5EncoderModelConfig, args?.excludeSubmodels)();
|
||||
export const useCLIPEmbedModels = (args?: ModelHookArgs) =>
|
||||
buildModelsHook(isCLIPEmbedModelConfig, args?.excludeSubmodels)();
|
||||
export const useT5EncoderModels = () => buildModelsHook(isT5EncoderModelConfigOrSubmodel)();
|
||||
export const useCLIPEmbedModels = () => buildModelsHook(isCLIPEmbedModelConfigOrSubmodel)();
|
||||
export const useSpandrelImageToImageModels = buildModelsHook(isSpandrelImageToImageModelConfig);
|
||||
export const useIPAdapterModels = buildModelsHook(isIPAdapterModelConfig);
|
||||
export const useEmbeddingModels = buildModelsHook(isTIModelConfig);
|
||||
export const useVAEModels = (args?: ModelHookArgs) => buildModelsHook(isVAEModelConfig, args?.excludeSubmodels)();
|
||||
export const useFluxVAEModels = (args?: ModelHookArgs) =>
|
||||
buildModelsHook(isFluxVAEModelConfig, args?.excludeSubmodels)();
|
||||
export const useCLIPVisionModels = buildModelsHook(isCLIPVisionModelConfig);
|
||||
export const useSigLipModels = buildModelsHook(isSigLipModelConfig);
|
||||
export const useFluxReduxModels = buildModelsHook(isFluxReduxModelConfig);
|
||||
export const useVAEModels = () => buildModelsHook(isVAEModelConfigOrSubmodel)();
|
||||
export const useFluxVAEModels = () => buildModelsHook(isFluxVAEModelConfig)();
|
||||
export const useGlobalReferenceImageModels = buildModelsHook(
|
||||
(config) =>
|
||||
isIPAdapterModelConfig(config) ||
|
||||
@@ -88,7 +69,6 @@ export const useGlobalReferenceImageModels = buildModelsHook(
|
||||
export const useRegionalReferenceImageModels = buildModelsHook(
|
||||
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config)
|
||||
);
|
||||
export const useLLaVAModels = buildModelsHook(isLLaVAModelConfig);
|
||||
export const useVideoModels = buildModelsHook(isVideoModelConfig);
|
||||
|
||||
const buildModelsSelector =
|
||||
@@ -100,23 +80,7 @@ const buildModelsSelector =
|
||||
}
|
||||
return modelConfigsAdapterSelectors.selectAll(result.data).filter(typeGuard);
|
||||
};
|
||||
// export const selectSDMainModels = buildModelsSelector(isNonRefinerNonFluxMainModelConfig);
|
||||
// export const selectMainModels = buildModelsSelector(isNonRefinerMainModelConfig);
|
||||
// export const selectNonSDXLMainModels = buildModelsSelector(isNonSDXLMainModelConfig);
|
||||
// export const selectRefinerModels = buildModelsSelector(isRefinerMainModelModelConfig);
|
||||
// export const selectFluxModels = buildModelsSelector(isFluxMainModelModelConfig);
|
||||
// export const selectSDXLModels = buildModelsSelector(isSDXLMainModelModelConfig);
|
||||
// export const selectLoRAModels = buildModelsSelector(isLoRAModelConfig);
|
||||
// export const selectControlNetAndT2IAdapterModels = buildModelsSelector(isControlNetOrT2IAdapterModelConfig);
|
||||
// export const selectControlNetModels = buildModelsSelector(isControlNetModelConfig);
|
||||
// export const selectT2IAdapterModels = buildModelsSelector(isT2IAdapterModelConfig);
|
||||
// export const selectT5EncoderModels = buildModelsSelector(isT5EncoderModelConfig);
|
||||
// export const selectClipEmbedModels = buildModelsSelector(isClipEmbedModelConfig);
|
||||
// export const selectSpandrelImageToImageModels = buildModelsSelector(isSpandrelImageToImageModelConfig);
|
||||
export const selectIPAdapterModels = buildModelsSelector(isIPAdapterModelConfig);
|
||||
// export const selectEmbeddingModels = buildModelsSelector(isTIModelConfig);
|
||||
// export const selectVAEModels = buildModelsSelector(isVAEModelConfig);
|
||||
// export const selectFluxVAEModels = buildModelsSelector(isFluxVAEModelConfig);
|
||||
export const selectGlobalRefImageModels = buildModelsSelector(
|
||||
(config) =>
|
||||
isIPAdapterModelConfig(config) ||
|
||||
@@ -129,19 +93,3 @@ export const selectGlobalRefImageModels = buildModelsSelector(
|
||||
export const selectRegionalRefImageModels = buildModelsSelector(
|
||||
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config)
|
||||
);
|
||||
|
||||
export const buildSelectModelConfig = <T extends AnyModelConfig>(
|
||||
key: string,
|
||||
typeGuard: (config: AnyModelConfig) => config is T
|
||||
): Selector<RootState, T | null> =>
|
||||
createSelector(selectModelConfigsQuery, (result) => {
|
||||
if (!result.data) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
modelConfigsAdapterSelectors
|
||||
.selectAll(result.data)
|
||||
.filter(typeGuard)
|
||||
.find((m) => m.key === key) ?? null
|
||||
);
|
||||
});
|
||||
|
||||
@@ -2467,7 +2467,7 @@ export type components = {
|
||||
* @description Base model type.
|
||||
* @enum {string}
|
||||
*/
|
||||
BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "gemini-2.5" | "chatgpt-4o" | "flux-kontext" | "veo3" | "runway";
|
||||
BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "gemini-2.5" | "chatgpt-4o" | "flux-kontext" | "veo3" | "runway" | "unknown";
|
||||
/** Batch */
|
||||
Batch: {
|
||||
/**
|
||||
@@ -12673,6 +12673,7 @@ export type components = {
|
||||
* remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
* scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
|
||||
* unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.
|
||||
* allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.
|
||||
*/
|
||||
InvokeAIAppConfig: {
|
||||
/**
|
||||
@@ -13028,6 +13029,12 @@ export type components = {
|
||||
* @default false
|
||||
*/
|
||||
unsafe_disable_picklescan?: boolean;
|
||||
/**
|
||||
* Allow Unknown Models
|
||||
* @description Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation.
|
||||
* @default true
|
||||
*/
|
||||
allow_unknown_models?: boolean;
|
||||
};
|
||||
/**
|
||||
* InvokeAIAppConfigWithSetFields
|
||||
@@ -16907,7 +16914,7 @@ export type components = {
|
||||
* @description Storage format of model.
|
||||
* @enum {string}
|
||||
*/
|
||||
ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api";
|
||||
ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api" | "unknown";
|
||||
/** ModelIdentifierField */
|
||||
ModelIdentifierField: {
|
||||
/**
|
||||
@@ -17040,6 +17047,11 @@ export type components = {
|
||||
* @description Size of the model (may be None for installation of a local path)
|
||||
*/
|
||||
total_bytes: number | null;
|
||||
/**
|
||||
* Config
|
||||
* @description The installed model's config
|
||||
*/
|
||||
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
};
|
||||
/**
|
||||
* ModelInstallDownloadProgressEvent
|
||||
@@ -17205,7 +17217,7 @@ export type components = {
|
||||
* Config Out
|
||||
* @description After successful installation, this will hold the configuration object.
|
||||
*/
|
||||
config_out?: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"]) | null;
|
||||
config_out?: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]) | null;
|
||||
/**
|
||||
* Inplace
|
||||
* @description Leave model in its current location; otherwise install under models directory
|
||||
@@ -17291,7 +17303,7 @@ export type components = {
|
||||
* Config
|
||||
* @description The model's config
|
||||
*/
|
||||
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
|
||||
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
/**
|
||||
* @description The submodel type, if any
|
||||
* @default null
|
||||
@@ -17312,7 +17324,7 @@ export type components = {
|
||||
* Config
|
||||
* @description The model's config
|
||||
*/
|
||||
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
|
||||
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
/**
|
||||
* @description The submodel type, if any
|
||||
* @default null
|
||||
@@ -17468,7 +17480,7 @@ export type components = {
|
||||
* @description Model type.
|
||||
* @enum {string}
|
||||
*/
|
||||
ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video";
|
||||
ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video" | "unknown";
|
||||
/**
|
||||
* ModelVariantType
|
||||
* @description Variant type.
|
||||
@@ -17481,7 +17493,7 @@ export type components = {
|
||||
*/
|
||||
ModelsList: {
|
||||
/** Models */
|
||||
models: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"])[];
|
||||
models: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"])[];
|
||||
};
|
||||
/**
|
||||
* Multiply Integers
|
||||
@@ -21928,6 +21940,86 @@ export type components = {
|
||||
*/
|
||||
token: string;
|
||||
};
|
||||
/** UnknownModelConfig */
|
||||
UnknownModelConfig: {
|
||||
/**
|
||||
* Key
|
||||
* @description A unique key for this model.
|
||||
*/
|
||||
key: string;
|
||||
/**
|
||||
* Hash
|
||||
* @description The hash of the model file(s).
|
||||
*/
|
||||
hash: string;
|
||||
/**
|
||||
* Path
|
||||
* @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
|
||||
*/
|
||||
path: string;
|
||||
/**
|
||||
* File Size
|
||||
* @description The size of the model in bytes.
|
||||
*/
|
||||
file_size: number;
|
||||
/**
|
||||
* Name
|
||||
* @description Name of the model.
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* Type
|
||||
* @default unknown
|
||||
* @constant
|
||||
*/
|
||||
type: "unknown";
|
||||
/**
|
||||
* Format
|
||||
* @default unknown
|
||||
* @constant
|
||||
*/
|
||||
format: "unknown";
|
||||
/**
|
||||
* Base
|
||||
* @default unknown
|
||||
* @constant
|
||||
*/
|
||||
base: "unknown";
|
||||
/**
|
||||
* Source
|
||||
* @description The original source of the model (path, URL or repo_id).
|
||||
*/
|
||||
source: string;
|
||||
/** @description The type of source */
|
||||
source_type: components["schemas"]["ModelSourceType"];
|
||||
/**
|
||||
* Description
|
||||
* @description Model description
|
||||
*/
|
||||
description?: string | null;
|
||||
/**
|
||||
* Source Api Response
|
||||
* @description The original API response from the source, as stringified JSON.
|
||||
*/
|
||||
source_api_response?: string | null;
|
||||
/**
|
||||
* Cover Image
|
||||
* @description Url for image to preview model
|
||||
*/
|
||||
cover_image?: string | null;
|
||||
/**
|
||||
* Submodels
|
||||
* @description Loadable submodels in this model
|
||||
*/
|
||||
submodels?: {
|
||||
[key: string]: components["schemas"]["SubmodelDefinition"];
|
||||
} | null;
|
||||
/**
|
||||
* Usage Info
|
||||
* @description Usage information for this model
|
||||
*/
|
||||
usage_info?: string | null;
|
||||
};
|
||||
/**
|
||||
* Unsharp Mask
|
||||
* @description Applies an unsharp mask filter to an image
|
||||
@@ -22938,7 +23030,7 @@ export interface operations {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
};
|
||||
};
|
||||
/** @description Validation Error */
|
||||
@@ -22988,7 +23080,7 @@ export interface operations {
|
||||
* "repo_variant": "fp16",
|
||||
* "upcast_attention": false
|
||||
* } */
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
};
|
||||
};
|
||||
/** @description Bad request */
|
||||
@@ -23093,7 +23185,7 @@ export interface operations {
|
||||
* "repo_variant": "fp16",
|
||||
* "upcast_attention": false
|
||||
* } */
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
};
|
||||
};
|
||||
/** @description Bad request */
|
||||
@@ -23607,7 +23699,7 @@ export interface operations {
|
||||
* "repo_variant": "fp16",
|
||||
* "upcast_attention": false
|
||||
* } */
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
|
||||
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
|
||||
};
|
||||
};
|
||||
/** @description Bad request */
|
||||
|
||||
@@ -105,16 +105,9 @@ export const isVideoDTO = (dto: ImageDTO | VideoDTO): dto is VideoDTO => {
|
||||
return 'video_id' in dto;
|
||||
};
|
||||
|
||||
// Models
|
||||
export type ModelType = S['ModelType'];
|
||||
export type BaseModelType = S['BaseModelType'];
|
||||
|
||||
// Model Configs
|
||||
|
||||
export type ControlLoRAModelConfig = S['ControlLoRALyCORISConfig'] | S['ControlLoRADiffusersConfig'];
|
||||
// TODO(MM2): Can we make key required in the pydantic model?
|
||||
export type LoRAModelConfig = S['LoRADiffusersConfig'] | S['LoRALyCORISConfig'] | S['LoRAOmiConfig'];
|
||||
// TODO(MM2): Can we rename this from Vae -> VAE
|
||||
export type VAEModelConfig = S['VAECheckpointConfig'] | S['VAEDiffusersConfig'];
|
||||
export type ControlNetModelConfig = S['ControlNetDiffusersConfig'] | S['ControlNetCheckpointConfig'];
|
||||
export type IPAdapterModelConfig = S['IPAdapterInvokeAIConfig'] | S['IPAdapterCheckpointConfig'];
|
||||
@@ -134,6 +127,7 @@ type SigLipModelConfig = S['SigLIPConfig'];
|
||||
export type FLUXReduxModelConfig = S['FluxReduxConfig'];
|
||||
type ApiModelConfig = S['ApiModelConfig'];
|
||||
export type VideoApiModelConfig = S['VideoApiModelConfig'];
|
||||
type UnknownModelConfig = S['UnknownModelConfig'];
|
||||
export type MainModelConfig = DiffusersModelConfig | CheckpointModelConfig | ApiModelConfig;
|
||||
export type FLUXKontextModelConfig = MainModelConfig;
|
||||
export type ChatGPT4oModelConfig = ApiModelConfig;
|
||||
@@ -155,7 +149,8 @@ export type AnyModelConfig =
|
||||
| CLIPVisionDiffusersConfig
|
||||
| SigLipModelConfig
|
||||
| FLUXReduxModelConfig
|
||||
| LlavaOnevisionConfig;
|
||||
| LlavaOnevisionConfig
|
||||
| UnknownModelConfig;
|
||||
|
||||
/**
|
||||
* Checks if a list of submodels contains any that match a given variant or type
|
||||
@@ -199,10 +194,17 @@ export const isControlLoRAModelConfig = (config: AnyModelConfig): config is Cont
|
||||
return config.type === 'control_lora';
|
||||
};
|
||||
|
||||
export const isVAEModelConfig = (config: AnyModelConfig, excludeSubmodels?: boolean): config is VAEModelConfig => {
|
||||
export const isVAEModelConfigOrSubmodel = (
|
||||
config: AnyModelConfig,
|
||||
excludeSubmodels?: boolean
|
||||
): config is VAEModelConfig => {
|
||||
return config.type === 'vae' || (!excludeSubmodels && config.type === 'main' && checkSubmodels(['vae'], config));
|
||||
};
|
||||
|
||||
export const isVAEModelConfig = (config: AnyModelConfig): config is VAEModelConfig => {
|
||||
return config.type === 'vae';
|
||||
};
|
||||
|
||||
export const isNonFluxVAEModelConfig = (
|
||||
config: AnyModelConfig,
|
||||
excludeSubmodels?: boolean
|
||||
@@ -246,7 +248,7 @@ export const isT2IAdapterModelConfig = (config: AnyModelConfig): config is T2IAd
|
||||
return config.type === 't2i_adapter';
|
||||
};
|
||||
|
||||
export const isT5EncoderModelConfig = (
|
||||
export const isT5EncoderModelConfigOrSubmodel = (
|
||||
config: AnyModelConfig,
|
||||
excludeSubmodels?: boolean
|
||||
): config is T5EncoderModelConfig | T5EncoderBnbQuantizedLlmInt8bModelConfig => {
|
||||
@@ -256,7 +258,13 @@ export const isT5EncoderModelConfig = (
|
||||
);
|
||||
};
|
||||
|
||||
export const isCLIPEmbedModelConfig = (
|
||||
export const isT5EncoderModelConfig = (
|
||||
config: AnyModelConfig
|
||||
): config is T5EncoderModelConfig | T5EncoderBnbQuantizedLlmInt8bModelConfig => {
|
||||
return config.type === 't5_encoder';
|
||||
};
|
||||
|
||||
export const isCLIPEmbedModelConfigOrSubmodel = (
|
||||
config: AnyModelConfig,
|
||||
excludeSubmodels?: boolean
|
||||
): config is CLIPEmbedModelConfig => {
|
||||
@@ -266,7 +274,11 @@ export const isCLIPEmbedModelConfig = (
|
||||
);
|
||||
};
|
||||
|
||||
export const isCLIPLEmbedModelConfig = (
|
||||
export const isCLIPEmbedModelConfig = (config: AnyModelConfig): config is CLIPEmbedModelConfig => {
|
||||
return config.type === 'clip_embed';
|
||||
};
|
||||
|
||||
export const isCLIPLEmbedModelConfigOrSubmodel = (
|
||||
config: AnyModelConfig,
|
||||
excludeSubmodels?: boolean
|
||||
): config is CLIPLEmbedModelConfig => {
|
||||
@@ -276,7 +288,7 @@ export const isCLIPLEmbedModelConfig = (
|
||||
);
|
||||
};
|
||||
|
||||
export const isCLIPGEmbedModelConfig = (
|
||||
export const isCLIPGEmbedModelConfigOrSubmodel = (
|
||||
config: AnyModelConfig,
|
||||
excludeSubmodels?: boolean
|
||||
): config is CLIPGEmbedModelConfig => {
|
||||
@@ -308,6 +320,10 @@ export const isVideoModelConfig = (config: AnyModelConfig): config is VideoApiMo
|
||||
return config.type === 'video';
|
||||
};
|
||||
|
||||
export const isUnknownModelConfig = (config: AnyModelConfig): config is UnknownModelConfig => {
|
||||
return config.type === 'unknown';
|
||||
};
|
||||
|
||||
export const isFluxKontextApiModelConfig = (config: AnyModelConfig): config is ApiModelConfig => {
|
||||
return config.type === 'main' && config.base === 'flux-kontext';
|
||||
};
|
||||
|
||||
@@ -4,6 +4,7 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppDispatch, AppGetState } from 'app/store/store';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { discordLink, githubIssuesLink } from 'features/system/store/constants';
|
||||
import { toast, toastApi } from 'features/toast/toast';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
import { t } from 'i18next';
|
||||
@@ -191,3 +192,10 @@ const HFUnauthorizedToastDescription = () => {
|
||||
</Text>
|
||||
);
|
||||
};
|
||||
|
||||
export const DiscordLink = () => {
|
||||
return <ExternalLink fontWeight="semibold" href={discordLink} display="inline-flex" label="Discord" />;
|
||||
};
|
||||
export const GitHubIssuesLink = () => {
|
||||
return <ExternalLink fontWeight="semibold" href={githubIssuesLink} display="inline-flex" label="GitHub" />;
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ExternalLink } from '@invoke-ai/ui-library';
|
||||
import { ExternalLink, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { socketConnected } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
|
||||
@@ -20,13 +20,14 @@ import ErrorToastDescription, { getTitle } from 'features/toast/ErrorToastDescri
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { LRUCache } from 'lru-cache';
|
||||
import { Trans } from 'react-i18next';
|
||||
import type { ApiTagDescription } from 'services/api';
|
||||
import { api, LIST_ALL_TAG, LIST_TAG } from 'services/api';
|
||||
import { modelsApi } from 'services/api/endpoints/models';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { workflowsApi } from 'services/api/endpoints/workflows';
|
||||
import { buildOnInvocationComplete } from 'services/events/onInvocationComplete';
|
||||
import { buildOnModelInstallError } from 'services/events/onModelInstallError';
|
||||
import { buildOnModelInstallError, DiscordLink, GitHubIssuesLink } from 'services/events/onModelInstallError';
|
||||
import type { ClientToServerEvents, ServerToClientEvents } from 'services/events/types';
|
||||
import type { Socket } from 'socket.io-client';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
@@ -292,7 +293,41 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
|
||||
socket.on('model_install_complete', (data) => {
|
||||
log.debug({ data }, 'Model install complete');
|
||||
|
||||
const { id } = data;
|
||||
const { id, config } = data;
|
||||
|
||||
if (
|
||||
config.type === 'unknown' ||
|
||||
config.base === 'unknown' ||
|
||||
/**
|
||||
* Checking if type/base are 'unknown' technically narrows the config such that it's not possible for a config
|
||||
* that passes to the `config.[type|base] === 'unknown'` checks. In the future, if we have more model config
|
||||
* classes, this may change, so we will continue to check all three. Any one being 'unknown' is concerning
|
||||
* enough to warrant a toast.
|
||||
*/
|
||||
/* @ts-expect-error See note above */
|
||||
config.format === 'unknown'
|
||||
) {
|
||||
toast({
|
||||
id: 'UNKNOWN_MODEL',
|
||||
title: t('modelManager.unidentifiedModelTitle'),
|
||||
description: (
|
||||
<Flex flexDir="column" gap={2}>
|
||||
<Text fontSize="md" as="span">
|
||||
<Trans i18nKey="modelManager.unidentifiedModelMessage" />
|
||||
</Text>
|
||||
<Text fontSize="md" as="span">
|
||||
<Trans
|
||||
i18nKey="modelManager.unidentifiedModelMessage2"
|
||||
components={{ DiscordLink: <DiscordLink />, GitHubIssuesLink: <GitHubIssuesLink /> }}
|
||||
/>
|
||||
</Text>
|
||||
</Flex>
|
||||
),
|
||||
status: 'error',
|
||||
isClosable: true,
|
||||
duration: null,
|
||||
});
|
||||
}
|
||||
|
||||
const installs = selectModelInstalls(getState()).data;
|
||||
|
||||
|
||||
189
test.json
Normal file
189
test.json
Normal file
@@ -0,0 +1,189 @@
|
||||
{
|
||||
"$defs": {
|
||||
"ClipVariantType": {
|
||||
"description": "Variant type.",
|
||||
"enum": ["large", "gigantic"],
|
||||
"title": "ClipVariantType",
|
||||
"type": "string"
|
||||
},
|
||||
"ModelSourceType": {
|
||||
"description": "Model source type.",
|
||||
"enum": ["path", "url", "hf_repo_id"],
|
||||
"title": "ModelSourceType",
|
||||
"type": "string"
|
||||
},
|
||||
"ModelType": {
|
||||
"description": "Model type.",
|
||||
"enum": [
|
||||
"onnx",
|
||||
"main",
|
||||
"vae",
|
||||
"lora",
|
||||
"control_lora",
|
||||
"controlnet",
|
||||
"embedding",
|
||||
"ip_adapter",
|
||||
"clip_vision",
|
||||
"clip_embed",
|
||||
"t2i_adapter",
|
||||
"t5_encoder",
|
||||
"spandrel_image_to_image",
|
||||
"siglip",
|
||||
"flux_redux",
|
||||
"llava_onevision",
|
||||
"video",
|
||||
"unknown"
|
||||
],
|
||||
"title": "ModelType",
|
||||
"type": "string"
|
||||
},
|
||||
"ModelVariantType": {
|
||||
"description": "Variant type.",
|
||||
"enum": ["normal", "inpaint", "depth"],
|
||||
"title": "ModelVariantType",
|
||||
"type": "string"
|
||||
},
|
||||
"SubModelType": {
|
||||
"description": "Submodel type.",
|
||||
"enum": [
|
||||
"unet",
|
||||
"transformer",
|
||||
"text_encoder",
|
||||
"text_encoder_2",
|
||||
"text_encoder_3",
|
||||
"tokenizer",
|
||||
"tokenizer_2",
|
||||
"tokenizer_3",
|
||||
"vae",
|
||||
"vae_decoder",
|
||||
"vae_encoder",
|
||||
"scheduler",
|
||||
"safety_checker"
|
||||
],
|
||||
"title": "SubModelType",
|
||||
"type": "string"
|
||||
},
|
||||
"SubmodelDefinition": {
|
||||
"properties": {
|
||||
"path_or_prefix": { "title": "Path Or Prefix", "type": "string" },
|
||||
"model_type": { "$ref": "#/$defs/ModelType" },
|
||||
"variant": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#/$defs/ModelVariantType" },
|
||||
{ "$ref": "#/$defs/ClipVariantType" },
|
||||
{ "type": "null" }
|
||||
],
|
||||
"default": null,
|
||||
"title": "Variant"
|
||||
}
|
||||
},
|
||||
"required": ["path_or_prefix", "model_type"],
|
||||
"title": "SubmodelDefinition",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": "A unique key for this model.",
|
||||
"title": "Key",
|
||||
"type": "string"
|
||||
},
|
||||
"hash": {
|
||||
"description": "The hash of the model file(s).",
|
||||
"title": "Hash",
|
||||
"type": "string"
|
||||
},
|
||||
"path": {
|
||||
"description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.",
|
||||
"title": "Path",
|
||||
"type": "string"
|
||||
},
|
||||
"file_size": {
|
||||
"description": "The size of the model in bytes.",
|
||||
"title": "File Size",
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the model.",
|
||||
"title": "Name",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"const": "unknown",
|
||||
"default": "unknown",
|
||||
"title": "Type",
|
||||
"type": "string"
|
||||
},
|
||||
"format": {
|
||||
"const": "unknown",
|
||||
"default": "unknown",
|
||||
"title": "Format",
|
||||
"type": "string"
|
||||
},
|
||||
"base": {
|
||||
"const": "any",
|
||||
"default": "any",
|
||||
"title": "Base",
|
||||
"type": "string"
|
||||
},
|
||||
"source": {
|
||||
"description": "The original source of the model (path, URL or repo_id).",
|
||||
"title": "Source",
|
||||
"type": "string"
|
||||
},
|
||||
"source_type": {
|
||||
"$ref": "#/$defs/ModelSourceType",
|
||||
"description": "The type of source"
|
||||
},
|
||||
"description": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"default": null,
|
||||
"description": "Model description",
|
||||
"title": "Description"
|
||||
},
|
||||
"source_api_response": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"default": null,
|
||||
"description": "The original API response from the source, as stringified JSON.",
|
||||
"title": "Source Api Response"
|
||||
},
|
||||
"cover_image": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"default": null,
|
||||
"description": "Url for image to preview model",
|
||||
"title": "Cover Image"
|
||||
},
|
||||
"submodels": {
|
||||
"anyOf": [
|
||||
{
|
||||
"additionalProperties": { "$ref": "#/$defs/SubmodelDefinition" },
|
||||
"propertyNames": { "$ref": "#/$defs/SubModelType" },
|
||||
"type": "object"
|
||||
},
|
||||
{ "type": "null" }
|
||||
],
|
||||
"default": null,
|
||||
"description": "Loadable submodels in this model",
|
||||
"title": "Submodels"
|
||||
},
|
||||
"usage_info": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"default": null,
|
||||
"description": "Usage information for this model",
|
||||
"title": "Usage Info"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"hash",
|
||||
"path",
|
||||
"file_size",
|
||||
"name",
|
||||
"source",
|
||||
"source_type",
|
||||
"key",
|
||||
"type",
|
||||
"format"
|
||||
],
|
||||
"title": "UnknownModelConfig",
|
||||
"type": "object"
|
||||
}
|
||||
Reference in New Issue
Block a user