refactor(ui): move model categorisation-ish logic to central location, simplify model manager models list

This commit is contained in:
psychedelicious
2025-09-18 17:54:52 +10:00
parent 4ae6c903e3
commit b26ab0b3f1
29 changed files with 636 additions and 481 deletions

View File

@@ -137,7 +137,7 @@ class ModelConfigBase(ABC, BaseModel):
@staticmethod
def json_schema_extra(schema: dict[str, Any]) -> None:
schema["required"].extend(["key", "type", "format"])
schema["required"].extend(["key", "base", "type", "format"])
model_config = ConfigDict(validate_assignment=True, json_schema_extra=json_schema_extra)
@@ -172,7 +172,8 @@ class ModelConfigBase(ABC, BaseModel):
super().__init_subclass__(**kwargs)
if issubclass(cls, LegacyProbeMixin):
ModelConfigBase.USING_LEGACY_PROBE.add(cls)
elif cls is not UnknownModelConfig:
# Cannot use `elif isinstance(cls, UnknownModelConfig)` because UnknownModelConfig is not defined yet
elif cls.__name__ != "UnknownModelConfig":
ModelConfigBase.USING_CLASSIFY_API.add(cls)
@staticmethod
@@ -274,16 +275,17 @@ class ModelConfigBase(ABC, BaseModel):
class UnknownModelConfig(ModelConfigBase):
base: Literal[BaseModelType.Any] = BaseModelType.Any
type: Literal[ModelType.Unknown] = ModelType.Unknown
format: Literal[ModelFormat.Unknown] = ModelFormat.Unknown
@classmethod
def matches(cls, *args, **kwargs) -> bool:
raise NotImplementedError("UnknownModelConfig cannot match anything")
def matches(cls, mod: ModelOnDisk) -> bool:
return False
@classmethod
def parse(cls, *args, **kwargs) -> dict[str, Any]:
raise NotImplementedError("UnknownModelConfig cannot parse anything")
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
return {}
class CheckpointConfigBase(ABC, BaseModel):

View File

@@ -11,8 +11,8 @@ import {
selectCanvasSlice,
} from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/modelManagerV2/models';
import { modelSelected } from 'features/parameters/store/actions';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';

View File

@@ -37,7 +37,7 @@ import type { Logger } from 'roarr';
import { modelConfigsAdapterSelectors, modelsApi } from 'services/api/endpoints/models';
import type { AnyModelConfig } from 'services/api/types';
import {
isCLIPEmbedModelConfig,
isCLIPEmbedModelConfigOrSubmodel,
isControlLayerModelConfig,
isControlNetModelConfig,
isFluxReduxModelConfig,
@@ -48,7 +48,7 @@ import {
isNonRefinerMainModelConfig,
isRefinerMainModelModelConfig,
isSpandrelImageToImageModelConfig,
isT5EncoderModelConfig,
isT5EncoderModelConfigOrSubmodel,
isVideoModelConfig,
} from 'services/api/types';
import type { JsonObject } from 'type-fest';
@@ -418,7 +418,7 @@ const handleTileControlNetModel: ModelHandler = (models, state, dispatch, log) =
const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
const selectedT5EncoderModel = state.params.t5EncoderModel;
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfig(m));
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfigOrSubmodel(m));
// If the currently selected model is available, we don't need to do anything
if (selectedT5EncoderModel && t5EncoderModels.some((m) => m.key === selectedT5EncoderModel.key)) {
@@ -446,7 +446,7 @@ const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
const handleCLIPEmbedModels: ModelHandler = (models, state, dispatch, log) => {
const selectedCLIPEmbedModel = state.params.clipEmbedModel;
const CLIPEmbedModels = models.filter((m) => isCLIPEmbedModelConfig(m));
const CLIPEmbedModels = models.filter((m) => isCLIPEmbedModelConfigOrSubmodel(m));
// If the currently selected model is available, we don't need to do anything
if (selectedCLIPEmbedModel && CLIPEmbedModels.some((m) => m.key === selectedCLIPEmbedModel.key)) {

View File

@@ -17,7 +17,7 @@ import Konva from 'konva';
import { atom, computed } from 'nanostores';
import type { Logger } from 'roarr';
import { serializeError } from 'serialize-error';
import { buildSelectModelConfig } from 'services/api/hooks/modelsByType';
import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
import { isControlLayerModelConfig } from 'services/api/types';
import stableHash from 'stable-hash';
import type { Equals } from 'tsafe';
@@ -202,11 +202,19 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
createInitialFilterConfig = (): FilterConfig => {
if (this.parent.type === 'control_layer_adapter' && this.parent.state.controlAdapter.model) {
// If the parent is a control layer adapter, we should check if the model has a default filter and set it if so
const selectModelConfig = buildSelectModelConfig(
this.parent.state.controlAdapter.model.key,
isControlLayerModelConfig
);
const modelConfig = this.manager.stateApi.runSelector(selectModelConfig);
const key = this.parent.state.controlAdapter.model.key;
const modelConfig = this.manager.stateApi.runSelector((state) => {
const { data } = selectModelConfigsQuery(state);
if (!data) {
return null;
}
return (
modelConfigsAdapterSelectors
.selectAll(data)
.filter(isControlLayerModelConfig)
.find((m) => m.key === key) ?? null
);
});
// This always returns a filter
const filter = getFilterForModel(modelConfig) ?? IMAGE_FILTERS.canny_edge_detection;
return filter.buildDefaults();

View File

@@ -13,8 +13,8 @@ import { selectBboxOverlay } from 'features/controlLayers/store/canvasSettingsSl
import { selectModel } from 'features/controlLayers/store/paramsSlice';
import { selectBbox } from 'features/controlLayers/store/selectors';
import type { Coordinate, Rect, Tool } from 'features/controlLayers/store/types';
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import Konva from 'konva';
import { atom } from 'nanostores';
import type { Logger } from 'roarr';

View File

@@ -35,8 +35,8 @@ import {
getScaledBoundingBoxDimensions,
} from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
import { simplifyFlatNumbersArray } from 'features/controlLayers/util/simplify';
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
import type { IRect } from 'konva/lib/types';
import type { UndoableOptions } from 'redux-undo';

View File

@@ -25,14 +25,14 @@ import {
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
import {
API_BASE_MODELS,
CLIP_SKIP_MAP,
SUPPORTS_ASPECT_RATIO_BASE_MODELS,
SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS,
SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS,
SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS,
SUPPORTS_REF_IMAGES_BASE_MODELS,
SUPPORTS_SEED_BASE_MODELS,
} from 'features/parameters/types/constants';
} from 'features/modelManagerV2/models';
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
import type {
ParameterCanvasCoherenceMode,
ParameterCFGRescaleMultiplier,

View File

@@ -6,8 +6,8 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
import type { GroupStatusMap } from 'common/components/Picker/Picker';
import { loraAdded, selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectBase } from 'features/controlLayers/store/paramsSlice';
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import { ModelPicker } from 'features/parameters/components/ModelPicker';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useLoRAModels } from 'services/api/hooks/modelsByType';

View File

@@ -0,0 +1,214 @@
import type { BaseModelType } from 'features/nodes/types/common';
import type { AnyModelConfig } from 'services/api/types';
import {
isCLIPEmbedModelConfig,
isCLIPVisionModelConfig,
isControlLoRAModelConfig,
isControlNetModelConfig,
isFluxReduxModelConfig,
isIPAdapterModelConfig,
isLLaVAModelConfig,
isLoRAModelConfig,
isNonRefinerMainModelConfig,
isRefinerMainModelModelConfig,
isSigLipModelConfig,
isSpandrelImageToImageModelConfig,
isT2IAdapterModelConfig,
isT5EncoderModelConfig,
isTIModelConfig,
isUnknownModelConfig,
isVAEModelConfig,
} from 'services/api/types';
type ModelCategoryData = {
i18nKey: string;
filter: (config: AnyModelConfig) => boolean;
};
export const MODEL_CATEGORIES: Record<string, ModelCategoryData> = {
main: {
i18nKey: 'model_manager.category.main_models',
filter: isNonRefinerMainModelConfig,
},
refiner: {
i18nKey: 'model_manager.category.refiner_models',
filter: isRefinerMainModelModelConfig,
},
lora: {
i18nKey: 'model_manager.category.lora_models',
filter: isLoRAModelConfig,
},
embedding: {
i18nKey: 'model_manager.category.embedding_models',
filter: isTIModelConfig,
},
controlnet: {
i18nKey: 'model_manager.category.controlnet_models',
filter: isControlNetModelConfig,
},
t2i_adapter: {
i18nKey: 'model_manager.category.t2i_adapter_models',
filter: isT2IAdapterModelConfig,
},
t5_encoder: {
i18nKey: 'model_manager.category.t5_encoder_models',
filter: isT5EncoderModelConfig,
},
control_lora: {
i18nKey: 'model_manager.category.control_lora_models',
filter: isControlLoRAModelConfig,
},
clip_embed: {
i18nKey: 'model_manager.category.clip_embed_models',
filter: isCLIPEmbedModelConfig,
},
spandrel: {
i18nKey: 'model_manager.category.spandrel_image_to_image_models',
filter: isSpandrelImageToImageModelConfig,
},
ip_adapter: {
i18nKey: 'model_manager.category.ip_adapter_models',
filter: isIPAdapterModelConfig,
},
vae: {
i18nKey: 'model_manager.category.vae_models',
filter: isVAEModelConfig,
},
clip_vision: {
i18nKey: 'model_manager.category.clip_vision_models',
filter: isCLIPVisionModelConfig,
},
siglip: {
i18nKey: 'model_manager.category.siglip_models',
filter: isSigLipModelConfig,
},
flux_redux: {
i18nKey: 'model_manager.category.flux_redux_models',
filter: isFluxReduxModelConfig,
},
llava_one_vision: {
i18nKey: 'model_manager.category.llava_one_vision_models',
filter: isLLaVAModelConfig,
},
unknown: {
i18nKey: 'model_manager.category.unknown_models',
filter: isUnknownModelConfig,
},
};
/**
* Mapping of model base to its color
*/
export const MODEL_BASE_TO_COLOR: Record<BaseModelType, string> = {
any: 'base',
'sd-1': 'green',
'sd-2': 'teal',
'sd-3': 'purple',
sdxl: 'invokeBlue',
'sdxl-refiner': 'invokeBlue',
flux: 'gold',
cogview4: 'red',
imagen3: 'pink',
imagen4: 'pink',
'chatgpt-4o': 'pink',
'flux-kontext': 'pink',
'gemini-2.5': 'pink',
veo3: 'purple',
runway: 'green',
};
/**
* Mapping of model base to human readable name
*/
export const MODEL_BASE_TO_LONG_NAME: Record<BaseModelType, string> = {
any: 'Any',
'sd-1': 'Stable Diffusion 1.x',
'sd-2': 'Stable Diffusion 2.x',
'sd-3': 'Stable Diffusion 3.x',
sdxl: 'Stable Diffusion XL',
'sdxl-refiner': 'Stable Diffusion XL Refiner',
flux: 'FLUX',
cogview4: 'CogView4',
imagen3: 'Imagen3',
imagen4: 'Imagen4',
'chatgpt-4o': 'ChatGPT 4o',
'flux-kontext': 'Flux Kontext',
'gemini-2.5': 'Gemini 2.5',
veo3: 'Veo3',
runway: 'Runway',
};
/**
* Mapping of model base to short human readable name
*/
export const MODEL_BASE_TO_SHORT_NAME: Record<BaseModelType, string> = {
any: 'Any',
'sd-1': 'SD1.X',
'sd-2': 'SD2.X',
'sd-3': 'SD3.X',
sdxl: 'SDXL',
'sdxl-refiner': 'SDXLR',
flux: 'FLUX',
cogview4: 'CogView4',
imagen3: 'Imagen3',
imagen4: 'Imagen4',
'chatgpt-4o': 'ChatGPT 4o',
'flux-kontext': 'Flux Kontext',
'gemini-2.5': 'Gemini 2.5',
veo3: 'Veo3',
runway: 'Runway',
};
/**
* List of base models that make API requests
*/
export const API_BASE_MODELS: BaseModelType[] = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext', 'gemini-2.5'];
export const SUPPORTS_SEED_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4'];
export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3'];
export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sdxl',
'flux',
'flux-kontext',
'chatgpt-4o',
'gemini-2.5',
];
export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sdxl',
'cogview4',
'sd-3',
'imagen3',
'imagen4',
];
export const SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sd-3',
'sdxl',
'flux',
'cogview4',
];
export const SUPPORTS_ASPECT_RATIO_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sd-3',
'sdxl',
'flux',
'cogview4',
'imagen3',
'imagen4',
'flux-kontext',
'chatgpt-4o',
];
export const VIDEO_BASE_MODELS = ['veo3', 'runway'];
export const REQUIRES_STARTING_FRAME_BASE_MODELS = ['runway'];

View File

@@ -1,5 +1,5 @@
import { Badge } from '@invoke-ai/ui-library';
import { MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
import { MODEL_BASE_TO_COLOR, MODEL_BASE_TO_SHORT_NAME } from 'features/modelManagerV2/models';
import { memo } from 'react';
import type { BaseModelType } from 'services/api/types';
@@ -7,28 +7,10 @@ type Props = {
base: BaseModelType;
};
export const BASE_COLOR_MAP: Record<BaseModelType, string> = {
any: 'base',
'sd-1': 'green',
'sd-2': 'teal',
'sd-3': 'purple',
sdxl: 'invokeBlue',
'sdxl-refiner': 'invokeBlue',
flux: 'gold',
cogview4: 'red',
imagen3: 'pink',
imagen4: 'pink',
'chatgpt-4o': 'pink',
'flux-kontext': 'pink',
'gemini-2.5': 'pink',
veo3: 'purple',
runway: 'green',
};
const ModelBaseBadge = ({ base }: Props) => {
return (
<Badge flexGrow={0} flexShrink={0} colorScheme={BASE_COLOR_MAP[base]} variant="subtle" h="min-content">
{MODEL_TYPE_SHORT_MAP[base]}
<Badge flexGrow={0} flexShrink={0} colorScheme={MODEL_BASE_TO_COLOR[base]} variant="subtle" h="min-content">
{MODEL_BASE_TO_SHORT_NAME[base]}
</Badge>
);
};

View File

@@ -19,6 +19,7 @@ const FORMAT_NAME_MAP: Record<AnyModelConfig['format'], string> = {
gguf_quantized: 'gguf',
api: 'api',
omi: 'omi',
unknown: 'unknown',
};
const FORMAT_COLOR_MAP: Record<AnyModelConfig['format'], string> = {
@@ -34,6 +35,7 @@ const FORMAT_COLOR_MAP: Record<AnyModelConfig['format'], string> = {
bnb_quantized_nf4b: 'base',
gguf_quantized: 'base',
api: 'base',
unknown: 'red',
};
const ModelFormatBadge = ({ format }: Props) => {

View File

@@ -1,6 +1,8 @@
import { Flex, Text } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import { useAppSelector } from 'app/store/storeHooks';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
import { MODEL_CATEGORIES } from 'features/modelManagerV2/models';
import {
type FilterableModelType,
selectFilteredModelType,
@@ -8,274 +10,50 @@ import {
} from 'features/modelManagerV2/store/modelManagerV2Slice';
import { memo, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import {
useCLIPEmbedModels,
useCLIPVisionModels,
useControlLoRAModel,
useControlNetModels,
useEmbeddingModels,
useFluxReduxModels,
useIPAdapterModels,
useLLaVAModels,
useLoRAModels,
useMainModels,
useRefinerModels,
useSigLipModels,
useSpandrelImageToImageModels,
useT2IAdapterModels,
useT5EncoderModels,
useVAEModels,
} from 'services/api/hooks/modelsByType';
import { modelConfigsAdapterSelectors, useGetModelConfigsQuery } from 'services/api/endpoints/models';
import type { AnyModelConfig } from 'services/api/types';
import { FetchingModelsLoader } from './FetchingModelsLoader';
import { ModelListWrapper } from './ModelListWrapper';
const log = logger('models');
const ModelList = () => {
const filteredModelType = useAppSelector(selectFilteredModelType);
const searchTerm = useAppSelector(selectSearchTerm);
const { t } = useTranslation();
const [mainModels, { isLoading: isLoadingMainModels }] = useMainModels();
const filteredMainModels = useMemo(
() => modelsFilter(mainModels, searchTerm, filteredModelType),
[mainModels, searchTerm, filteredModelType]
);
const { data, isLoading } = useGetModelConfigsQuery();
const [refinerModels, { isLoading: isLoadingRefinerModels }] = useRefinerModels();
const filteredRefinerModels = useMemo(
() => modelsFilter(refinerModels, searchTerm, filteredModelType),
[refinerModels, searchTerm, filteredModelType]
);
const [loraModels, { isLoading: isLoadingLoRAModels }] = useLoRAModels();
const filteredLoRAModels = useMemo(
() => modelsFilter(loraModels, searchTerm, filteredModelType),
[loraModels, searchTerm, filteredModelType]
);
const [embeddingModels, { isLoading: isLoadingEmbeddingModels }] = useEmbeddingModels();
const filteredEmbeddingModels = useMemo(
() => modelsFilter(embeddingModels, searchTerm, filteredModelType),
[embeddingModels, searchTerm, filteredModelType]
);
const [controlNetModels, { isLoading: isLoadingControlNetModels }] = useControlNetModels();
const filteredControlNetModels = useMemo(
() => modelsFilter(controlNetModels, searchTerm, filteredModelType),
[controlNetModels, searchTerm, filteredModelType]
);
const [t2iAdapterModels, { isLoading: isLoadingT2IAdapterModels }] = useT2IAdapterModels();
const filteredT2IAdapterModels = useMemo(
() => modelsFilter(t2iAdapterModels, searchTerm, filteredModelType),
[t2iAdapterModels, searchTerm, filteredModelType]
);
const [ipAdapterModels, { isLoading: isLoadingIPAdapterModels }] = useIPAdapterModels();
const filteredIPAdapterModels = useMemo(
() => modelsFilter(ipAdapterModels, searchTerm, filteredModelType),
[ipAdapterModels, searchTerm, filteredModelType]
);
const [clipVisionModels, { isLoading: isLoadingCLIPVisionModels }] = useCLIPVisionModels();
const filteredCLIPVisionModels = useMemo(
() => modelsFilter(clipVisionModels, searchTerm, filteredModelType),
[clipVisionModels, searchTerm, filteredModelType]
);
const [vaeModels, { isLoading: isLoadingVAEModels }] = useVAEModels({ excludeSubmodels: true });
const filteredVAEModels = useMemo(
() => modelsFilter(vaeModels, searchTerm, filteredModelType),
[vaeModels, searchTerm, filteredModelType]
);
const [t5EncoderModels, { isLoading: isLoadingT5EncoderModels }] = useT5EncoderModels({ excludeSubmodels: true });
const filteredT5EncoderModels = useMemo(
() => modelsFilter(t5EncoderModels, searchTerm, filteredModelType),
[t5EncoderModels, searchTerm, filteredModelType]
);
const [controlLoRAModels, { isLoading: isLoadingControlLoRAModels }] = useControlLoRAModel();
const filteredControlLoRAModels = useMemo(
() => modelsFilter(controlLoRAModels, searchTerm, filteredModelType),
[controlLoRAModels, searchTerm, filteredModelType]
);
const [clipEmbedModels, { isLoading: isLoadingClipEmbedModels }] = useCLIPEmbedModels({ excludeSubmodels: true });
const filteredClipEmbedModels = useMemo(
() => modelsFilter(clipEmbedModels, searchTerm, filteredModelType),
[clipEmbedModels, searchTerm, filteredModelType]
);
const [spandrelImageToImageModels, { isLoading: isLoadingSpandrelImageToImageModels }] =
useSpandrelImageToImageModels();
const filteredSpandrelImageToImageModels = useMemo(
() => modelsFilter(spandrelImageToImageModels, searchTerm, filteredModelType),
[spandrelImageToImageModels, searchTerm, filteredModelType]
);
const [sigLipModels, { isLoading: isLoadingSigLipModels }] = useSigLipModels();
const filteredSigLipModels = useMemo(
() => modelsFilter(sigLipModels, searchTerm, filteredModelType),
[sigLipModels, searchTerm, filteredModelType]
);
const [fluxReduxModels, { isLoading: isLoadingFluxReduxModels }] = useFluxReduxModels();
const filteredFluxReduxModels = useMemo(
() => modelsFilter(fluxReduxModels, searchTerm, filteredModelType),
[fluxReduxModels, searchTerm, filteredModelType]
);
const [llavaOneVisionModels, { isLoading: isLoadingLlavaOneVisionModels }] = useLLaVAModels();
const filteredLlavaOneVisionModels = useMemo(
() => modelsFilter(llavaOneVisionModels, searchTerm, filteredModelType),
[llavaOneVisionModels, searchTerm, filteredModelType]
);
const totalFilteredModels = useMemo(() => {
return (
filteredMainModels.length +
filteredRefinerModels.length +
filteredLoRAModels.length +
filteredEmbeddingModels.length +
filteredControlNetModels.length +
filteredT2IAdapterModels.length +
filteredIPAdapterModels.length +
filteredCLIPVisionModels.length +
filteredVAEModels.length +
filteredSpandrelImageToImageModels.length +
filteredSigLipModels.length +
filteredFluxReduxModels.length +
t5EncoderModels.length +
clipEmbedModels.length +
controlLoRAModels.length
);
}, [
filteredControlNetModels.length,
filteredEmbeddingModels.length,
filteredIPAdapterModels.length,
filteredCLIPVisionModels.length,
filteredLoRAModels.length,
filteredMainModels.length,
filteredRefinerModels.length,
filteredT2IAdapterModels.length,
filteredVAEModels.length,
filteredSpandrelImageToImageModels.length,
filteredSigLipModels.length,
filteredFluxReduxModels.length,
t5EncoderModels.length,
clipEmbedModels.length,
controlLoRAModels.length,
]);
const models = useMemo(() => {
const modelConfigs = modelConfigsAdapterSelectors.selectAll(data ?? { ids: [], entities: {} });
const baseFilteredModelConfigs = modelsFilter(modelConfigs, searchTerm, filteredModelType);
const byCategory: { i18nKey: string; configs: AnyModelConfig[] }[] = [];
const total = baseFilteredModelConfigs.length;
let renderedTotal = 0;
for (const { i18nKey, filter } of Object.values(MODEL_CATEGORIES)) {
const configs = baseFilteredModelConfigs.filter(filter);
renderedTotal += configs.length;
byCategory.push({ i18nKey, configs });
}
if (renderedTotal !== total) {
const ctx = { total, renderedTotal, difference: total - renderedTotal };
log.warn(
ctx,
`ModelList: Not all models were categorized - ensure all possible models are covered in MODEL_CATEGORIES`
);
}
return { total, byCategory };
}, [data, filteredModelType, searchTerm]);
return (
<ScrollableContent>
<Flex flexDirection="column" w="full" h="full" gap={4}>
{/* Main Model List */}
{isLoadingMainModels && <FetchingModelsLoader loadingMessage="Loading Main Models..." />}
{!isLoadingMainModels && filteredMainModels.length > 0 && (
<ModelListWrapper title={t('modelManager.main')} modelList={filteredMainModels} key="main" />
)}
{/* Refiner Model List */}
{isLoadingRefinerModels && <FetchingModelsLoader loadingMessage="Loading Refiner Models..." />}
{!isLoadingRefinerModels && filteredRefinerModels.length > 0 && (
<ModelListWrapper title={t('sdxl.refiner')} modelList={filteredRefinerModels} key="refiner" />
)}
{/* LoRAs List */}
{isLoadingLoRAModels && <FetchingModelsLoader loadingMessage="Loading LoRAs..." />}
{!isLoadingLoRAModels && filteredLoRAModels.length > 0 && (
<ModelListWrapper title={t('modelManager.loraModels')} modelList={filteredLoRAModels} key="loras" />
)}
{/* TI List */}
{isLoadingEmbeddingModels && <FetchingModelsLoader loadingMessage="Loading Textual Inversions..." />}
{!isLoadingEmbeddingModels && filteredEmbeddingModels.length > 0 && (
<ModelListWrapper
title={t('modelManager.textualInversions')}
modelList={filteredEmbeddingModels}
key="textual-inversions"
/>
)}
{/* VAE List */}
{isLoadingVAEModels && <FetchingModelsLoader loadingMessage="Loading VAEs..." />}
{!isLoadingVAEModels && filteredVAEModels.length > 0 && (
<ModelListWrapper title="VAE" modelList={filteredVAEModels} key="vae" />
)}
{/* Controlnet List */}
{isLoadingControlNetModels && <FetchingModelsLoader loadingMessage="Loading ControlNets..." />}
{!isLoadingControlNetModels && filteredControlNetModels.length > 0 && (
<ModelListWrapper title="ControlNet" modelList={filteredControlNetModels} key="controlnets" />
)}
{/* IP Adapter List */}
{isLoadingIPAdapterModels && <FetchingModelsLoader loadingMessage="Loading IP Adapters..." />}
{!isLoadingIPAdapterModels && filteredIPAdapterModels.length > 0 && (
<ModelListWrapper title={t('common.ipAdapter')} modelList={filteredIPAdapterModels} key="ip-adapters" />
)}
{/* CLIP Vision List */}
{isLoadingCLIPVisionModels && <FetchingModelsLoader loadingMessage="Loading CLIP Vision Models..." />}
{!isLoadingCLIPVisionModels && filteredCLIPVisionModels.length > 0 && (
<ModelListWrapper title="CLIP Vision" modelList={filteredCLIPVisionModels} key="clip-vision" />
)}
{/* T2I Adapters List */}
{isLoadingT2IAdapterModels && <FetchingModelsLoader loadingMessage="Loading T2I Adapters..." />}
{!isLoadingT2IAdapterModels && filteredT2IAdapterModels.length > 0 && (
<ModelListWrapper title={t('common.t2iAdapter')} modelList={filteredT2IAdapterModels} key="t2i-adapters" />
)}
{/* T5 Encoders List */}
{isLoadingT5EncoderModels && <FetchingModelsLoader loadingMessage="Loading T5 Encoder Models..." />}
{!isLoadingT5EncoderModels && filteredT5EncoderModels.length > 0 && (
<ModelListWrapper title={t('modelManager.t5Encoder')} modelList={filteredT5EncoderModels} key="t5-encoder" />
)}
{/* Control Lora List */}
{isLoadingControlLoRAModels && <FetchingModelsLoader loadingMessage="Loading Control Loras..." />}
{!isLoadingControlLoRAModels && filteredControlLoRAModels.length > 0 && (
<ModelListWrapper
title={t('modelManager.controlLora')}
modelList={filteredControlLoRAModels}
key="control-lora"
/>
)}
{/* Clip Embed List */}
{isLoadingClipEmbedModels && <FetchingModelsLoader loadingMessage="Loading Clip Embed Models..." />}
{!isLoadingClipEmbedModels && filteredClipEmbedModels.length > 0 && (
<ModelListWrapper title={t('modelManager.clipEmbed')} modelList={filteredClipEmbedModels} key="clip-embed" />
)}
{/* LLaVA OneVision List */}
{isLoadingLlavaOneVisionModels && <FetchingModelsLoader loadingMessage="Loading LLaVA OneVision Models..." />}
{!isLoadingLlavaOneVisionModels && filteredLlavaOneVisionModels.length > 0 && (
<ModelListWrapper
title={t('modelManager.llavaOnevision')}
modelList={filteredLlavaOneVisionModels}
key="llava-onevision"
/>
)}
{/* Spandrel Image to Image List */}
{isLoadingSpandrelImageToImageModels && (
<FetchingModelsLoader loadingMessage="Loading Image-to-Image Models..." />
)}
{!isLoadingSpandrelImageToImageModels && filteredSpandrelImageToImageModels.length > 0 && (
<ModelListWrapper
title={t('modelManager.spandrelImageToImage')}
modelList={filteredSpandrelImageToImageModels}
key="spandrel-image-to-image"
/>
)}
{/* SigLIP List */}
{isLoadingSigLipModels && <FetchingModelsLoader loadingMessage="Loading SigLIP Models..." />}
{!isLoadingSigLipModels && filteredSigLipModels.length > 0 && (
<ModelListWrapper title={t('modelManager.sigLip')} modelList={filteredSigLipModels} key="sig-lip" />
)}
{/* Flux Redux List */}
{isLoadingFluxReduxModels && <FetchingModelsLoader loadingMessage="Loading Flux Redux Models..." />}
{!isLoadingFluxReduxModels && filteredFluxReduxModels.length > 0 && (
<ModelListWrapper title={t('modelManager.fluxRedux')} modelList={filteredFluxReduxModels} key="flux-redux" />
)}
{totalFilteredModels === 0 && (
{isLoading && <FetchingModelsLoader loadingMessage="Loading..." />}
{models.byCategory.map(({ i18nKey, configs }) => (
<ModelListWrapper key={i18nKey} title={t(i18nKey)} modelList={configs} />
))}
{!isLoading && models.total === 0 && (
<Flex w="full" h="full" alignItems="center" justifyContent="center">
<Text>{t('modelManager.noMatchingModels')}</Text>
</Flex>
@@ -293,7 +71,13 @@ const modelsFilter = <T extends AnyModelConfig>(
filteredModelType: FilterableModelType | null
): T[] => {
return data.filter((model) => {
const matchesFilter = model.name.toLowerCase().includes(nameFilter.toLowerCase());
const matchesFilter =
model.name.toLowerCase().includes(nameFilter.toLowerCase()) ||
model.base.toLowerCase().includes(nameFilter.toLowerCase()) ||
model.type.toLowerCase().includes(nameFilter.toLowerCase()) ||
model.description?.toLowerCase().includes(nameFilter.toLowerCase()) ||
model.format.toLowerCase().includes(nameFilter.toLowerCase());
const matchesType = getMatchesType(model, filteredModelType);
return matchesFilter && matchesType;

View File

@@ -25,6 +25,9 @@ const contentSx = {
export const ModelListWrapper = memo((props: ModelListWrapperProps) => {
const { title, modelList } = props;
if (modelList.length === 0) {
return null;
}
return (
<StickyScrollable title={title} contentSx={contentSx} headingSx={headingSx}>
{modelList.map((model) => (

View File

@@ -29,6 +29,7 @@ export const ModelTypeFilter = memo(() => {
flux_redux: t('modelManager.fluxRedux'),
llava_onevision: t('modelManager.llavaOnevision'),
video: t('modelManager.video'),
unknown: t('modelManager.unknown'),
}),
[t]
);

View File

@@ -1,19 +1,19 @@
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import { Combobox } from '@invoke-ai/ui-library';
import { typedMemo } from 'common/util/typedMemo';
import { MODEL_TYPE_MAP } from 'features/parameters/types/constants';
import { MODEL_BASE_TO_LONG_NAME } from 'features/modelManagerV2/models';
import { useCallback, useMemo } from 'react';
import type { Control } from 'react-hook-form';
import { useController } from 'react-hook-form';
import type { UpdateModelArg } from 'services/api/endpoints/models';
const options: ComboboxOption[] = [
{ value: 'sd-1', label: MODEL_TYPE_MAP['sd-1'] },
{ value: 'sd-2', label: MODEL_TYPE_MAP['sd-2'] },
{ value: 'sd-3', label: MODEL_TYPE_MAP['sd-3'] },
{ value: 'flux', label: MODEL_TYPE_MAP['flux'] },
{ value: 'sdxl', label: MODEL_TYPE_MAP['sdxl'] },
{ value: 'sdxl-refiner', label: MODEL_TYPE_MAP['sdxl-refiner'] },
{ value: 'sd-1', label: MODEL_BASE_TO_LONG_NAME['sd-1'] },
{ value: 'sd-2', label: MODEL_BASE_TO_LONG_NAME['sd-2'] },
{ value: 'sd-3', label: MODEL_BASE_TO_LONG_NAME['sd-3'] },
{ value: 'flux', label: MODEL_BASE_TO_LONG_NAME['flux'] },
{ value: 'sdxl', label: MODEL_BASE_TO_LONG_NAME['sdxl'] },
{ value: 'sdxl-refiner', label: MODEL_BASE_TO_LONG_NAME['sdxl-refiner'] },
];
type Props = {

View File

@@ -126,6 +126,7 @@ export const zModelType = z.enum([
'siglip',
'flux_redux',
'video',
'unknown',
]);
const zSubModelType = z.enum([
'unet',
@@ -161,6 +162,7 @@ export const zModelFormat = z.enum([
'bnb_quantized_nf4b',
'gguf_quantized',
'api',
'unknown',
]);
export const zModelIdentifierField = z.object({

View File

@@ -2,8 +2,8 @@ import type { RootState } from 'app/store/store';
import { generateSeeds } from 'common/util/generateSeeds';
import { range } from 'es-toolkit/compat';
import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { API_BASE_MODELS, VIDEO_BASE_MODELS } from 'features/modelManagerV2/models';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { API_BASE_MODELS, VIDEO_BASE_MODELS } from 'features/parameters/types/constants';
import type { components } from 'services/api/schema';
import type { BaseModelType, Batch, EnqueueBatchArg, Invocation } from 'services/api/types';

View File

@@ -7,7 +7,7 @@ import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { useCLIPEmbedModels } from 'services/api/hooks/modelsByType';
import type { CLIPGEmbedModelConfig } from 'services/api/types';
import { isCLIPGEmbedModelConfig } from 'services/api/types';
import { isCLIPGEmbedModelConfigOrSubmodel } from 'services/api/types';
const ParamCLIPEmbedModelSelect = () => {
const dispatch = useAppDispatch();
@@ -25,7 +25,7 @@ const ParamCLIPEmbedModelSelect = () => {
);
const { options, value, onChange, noOptionsMessage } = useModelCombobox({
modelConfigs: modelConfigs.filter((config) => isCLIPGEmbedModelConfig(config)),
modelConfigs: modelConfigs.filter((config) => isCLIPGEmbedModelConfigOrSubmodel(config)),
onChange: _onChange,
selectedModel: clipEmbedModel,
isLoading,

View File

@@ -7,7 +7,7 @@ import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { useCLIPEmbedModels } from 'services/api/hooks/modelsByType';
import type { CLIPLEmbedModelConfig } from 'services/api/types';
import { isCLIPLEmbedModelConfig } from 'services/api/types';
import { isCLIPLEmbedModelConfigOrSubmodel } from 'services/api/types';
const ParamCLIPEmbedModelSelect = () => {
const dispatch = useAppDispatch();
@@ -25,7 +25,7 @@ const ParamCLIPEmbedModelSelect = () => {
);
const { options, value, onChange, noOptionsMessage } = useModelCombobox({
modelConfigs: modelConfigs.filter((config) => isCLIPLEmbedModelConfig(config)),
modelConfigs: modelConfigs.filter((config) => isCLIPLEmbedModelConfigOrSubmodel(config)),
onChange: _onChange,
selectedModel: clipEmbedModel,
isLoading,

View File

@@ -24,11 +24,15 @@ import { typedMemo } from 'common/util/typedMemo';
import { uniq } from 'es-toolkit/compat';
import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import {
API_BASE_MODELS,
MODEL_BASE_TO_COLOR,
MODEL_BASE_TO_LONG_NAME,
MODEL_BASE_TO_SHORT_NAME,
} from 'features/modelManagerV2/models';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
import { BASE_COLOR_MAP } from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge';
import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage';
import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton';
import { API_BASE_MODELS, MODEL_TYPE_MAP, MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -123,21 +127,21 @@ const getGroupNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
if (API_BASE_MODELS.includes(modelConfig.base)) {
return 'External API';
}
return MODEL_TYPE_MAP[modelConfig.base];
return MODEL_BASE_TO_LONG_NAME[modelConfig.base];
};
const getGroupShortNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
if (API_BASE_MODELS.includes(modelConfig.base)) {
return 'api';
}
return MODEL_TYPE_SHORT_MAP[modelConfig.base];
return MODEL_BASE_TO_SHORT_NAME[modelConfig.base];
};
const getGroupColorSchemeFromModelConfig = (modelConfig: AnyModelConfig): string => {
if (API_BASE_MODELS.includes(modelConfig.base)) {
return 'pink';
}
return BASE_COLOR_MAP[modelConfig.base];
return MODEL_BASE_TO_COLOR[modelConfig.base];
};
const relatedModelKeysQueryOptions = {

View File

@@ -21,9 +21,9 @@ import {
zVideoDuration,
zVideoResolution,
} from 'features/controlLayers/store/types';
import { REQUIRES_STARTING_FRAME_BASE_MODELS } from 'features/modelManagerV2/models';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { REQUIRES_STARTING_FRAME_BASE_MODELS } from 'features/parameters/types/constants';
import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
import { isVideoModelConfig } from 'services/api/types';
import { assert } from 'tsafe';

View File

@@ -1,48 +1,6 @@
import type { ComboboxOption } from '@invoke-ai/ui-library';
import type { BaseModelType } from 'services/api/types';
/**
* Mapping of base model to human readable name
*/
export const MODEL_TYPE_MAP: Record<BaseModelType, string> = {
any: 'Any',
'sd-1': 'Stable Diffusion 1.x',
'sd-2': 'Stable Diffusion 2.x',
'sd-3': 'Stable Diffusion 3.x',
sdxl: 'Stable Diffusion XL',
'sdxl-refiner': 'Stable Diffusion XL Refiner',
flux: 'FLUX',
cogview4: 'CogView4',
imagen3: 'Imagen3',
imagen4: 'Imagen4',
'chatgpt-4o': 'ChatGPT 4o',
'flux-kontext': 'Flux Kontext',
'gemini-2.5': 'Gemini 2.5',
veo3: 'Veo3',
runway: 'Runway',
};
/**
* Mapping of base model to (short) human readable name
*/
export const MODEL_TYPE_SHORT_MAP: Record<BaseModelType, string> = {
any: 'Any',
'sd-1': 'SD1.X',
'sd-2': 'SD2.X',
'sd-3': 'SD3.X',
sdxl: 'SDXL',
'sdxl-refiner': 'SDXLR',
flux: 'FLUX',
cogview4: 'CogView4',
imagen3: 'Imagen3',
imagen4: 'Imagen4',
'chatgpt-4o': 'ChatGPT 4o',
'flux-kontext': 'Flux Kontext',
'gemini-2.5': 'Gemini 2.5',
veo3: 'Veo3',
runway: 'Runway',
};
/**
* Mapping of base model to CLIP skip parameter constraints
*/
@@ -136,57 +94,3 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [
{ value: 'unipc', label: 'UniPC' },
{ value: 'unipc_k', label: 'UniPC Karras' },
];
/**
* List of base models that make API requests
*/
export const API_BASE_MODELS: BaseModelType[] = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext', 'gemini-2.5'];
export const SUPPORTS_SEED_BASE_MODELS: BaseModelType[] = ['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4'];
export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3'];
export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sdxl',
'flux',
'flux-kontext',
'chatgpt-4o',
'gemini-2.5',
];
export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sdxl',
'cogview4',
'sd-3',
'imagen3',
'imagen4',
];
export const SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sd-3',
'sdxl',
'flux',
'cogview4',
];
export const SUPPORTS_ASPECT_RATIO_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sd-3',
'sdxl',
'flux',
'cogview4',
'imagen3',
'imagen4',
'flux-kontext',
'chatgpt-4o',
];
export const VIDEO_BASE_MODELS = ['veo3', 'runway'];
export const REQUIRES_STARTING_FRAME_BASE_MODELS = ['runway'];

View File

@@ -24,6 +24,7 @@ import {
import type { DynamicPromptsState } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/modelManagerV2/models';
import { $isInPublishFlow } from 'features/nodes/components/sidePanel/workflow/publish';
import { $templates } from 'features/nodes/store/nodesSlice';
import { selectNodesSlice } from 'features/nodes/store/selectors';
@@ -37,7 +38,6 @@ import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled
import type { UpscaleState } from 'features/parameters/store/upscaleSlice';
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
import { selectVideoSlice, type VideoState } from 'features/parameters/store/videoSlice';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
import { getGridSize } from 'features/parameters/util/optimalDimension';
import { promptExpansionApi, type PromptExpansionRequestState } from 'features/prompt/PromptExpansion/state';

View File

@@ -12,12 +12,12 @@ import {
} from 'features/controlLayers/store/paramsSlice';
import { LoRAList } from 'features/lora/components/LoRAList';
import LoRASelect from 'features/lora/components/LoRASelect';
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import ParamCFGScale from 'features/parameters/components/Core/ParamCFGScale';
import ParamGuidance from 'features/parameters/components/Core/ParamGuidance';
import ParamScheduler from 'features/parameters/components/Core/ParamScheduler';
import ParamSteps from 'features/parameters/components/Core/ParamSteps';
import { DisabledModelWarning } from 'features/parameters/components/MainModel/DisabledModelWarning';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import { MainModelPicker } from 'features/settingsAccordions/components/GenerationSettingsAccordion/MainModelPicker';
import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle';
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';

View File

@@ -7,12 +7,12 @@ import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectIsApiBaseModel, selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
import { LoRAList } from 'features/lora/components/LoRAList';
import LoRASelect from 'features/lora/components/LoRASelect';
import { API_BASE_MODELS } from 'features/modelManagerV2/models';
import ParamGuidance from 'features/parameters/components/Core/ParamGuidance';
import ParamSteps from 'features/parameters/components/Core/ParamSteps';
import { DisabledModelWarning } from 'features/parameters/components/MainModel/DisabledModelWarning';
import ParamUpscaleCFGScale from 'features/parameters/components/Upscale/ParamUpscaleCFGScale';
import ParamUpscaleScheduler from 'features/parameters/components/Upscale/ParamUpscaleScheduler';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import { MainModelPicker } from 'features/settingsAccordions/components/GenerationSettingsAccordion/MainModelPicker';
import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle';
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';

View File

@@ -1,4 +1,4 @@
import { createSelector, type Selector } from '@reduxjs/toolkit';
import type { Selector } from '@reduxjs/toolkit';
import { EMPTY_ARRAY } from 'app/store/constants';
import type { RootState } from 'app/store/store';
import { useMemo } from 'react';
@@ -10,10 +10,8 @@ import {
import type { AnyModelConfig } from 'services/api/types';
import {
isChatGPT4oModelConfig,
isCLIPEmbedModelConfig,
isCLIPVisionModelConfig,
isCLIPEmbedModelConfigOrSubmodel,
isControlLayerModelConfig,
isControlLoRAModelConfig,
isControlNetModelConfig,
isFluxKontextApiModelConfig,
isFluxKontextModelConfig,
@@ -21,16 +19,13 @@ import {
isFluxVAEModelConfig,
isGemini2_5ModelConfig,
isIPAdapterModelConfig,
isLLaVAModelConfig,
isLoRAModelConfig,
isNonRefinerMainModelConfig,
isRefinerMainModelModelConfig,
isSigLipModelConfig,
isSpandrelImageToImageModelConfig,
isT2IAdapterModelConfig,
isT5EncoderModelConfig,
isT5EncoderModelConfigOrSubmodel,
isTIModelConfig,
isVAEModelConfig,
isVAEModelConfigOrSubmodel,
isVideoModelConfig,
} from 'services/api/types';
@@ -59,23 +54,18 @@ const buildModelsHook =
export const useMainModels = buildModelsHook(isNonRefinerMainModelConfig);
export const useRefinerModels = buildModelsHook(isRefinerMainModelModelConfig);
export const useLoRAModels = buildModelsHook(isLoRAModelConfig);
export const useControlLoRAModel = buildModelsHook(isControlLoRAModelConfig);
export const useControlLayerModels = buildModelsHook(isControlLayerModelConfig);
export const useControlNetModels = buildModelsHook(isControlNetModelConfig);
export const useT2IAdapterModels = buildModelsHook(isT2IAdapterModelConfig);
export const useT5EncoderModels = (args?: ModelHookArgs) =>
buildModelsHook(isT5EncoderModelConfig, args?.excludeSubmodels)();
buildModelsHook(isT5EncoderModelConfigOrSubmodel, args?.excludeSubmodels)();
export const useCLIPEmbedModels = (args?: ModelHookArgs) =>
buildModelsHook(isCLIPEmbedModelConfig, args?.excludeSubmodels)();
buildModelsHook(isCLIPEmbedModelConfigOrSubmodel, args?.excludeSubmodels)();
export const useSpandrelImageToImageModels = buildModelsHook(isSpandrelImageToImageModelConfig);
export const useIPAdapterModels = buildModelsHook(isIPAdapterModelConfig);
export const useEmbeddingModels = buildModelsHook(isTIModelConfig);
export const useVAEModels = (args?: ModelHookArgs) => buildModelsHook(isVAEModelConfig, args?.excludeSubmodels)();
export const useVAEModels = (args?: ModelHookArgs) =>
buildModelsHook(isVAEModelConfigOrSubmodel, args?.excludeSubmodels)();
export const useFluxVAEModels = (args?: ModelHookArgs) =>
buildModelsHook(isFluxVAEModelConfig, args?.excludeSubmodels)();
export const useCLIPVisionModels = buildModelsHook(isCLIPVisionModelConfig);
export const useSigLipModels = buildModelsHook(isSigLipModelConfig);
export const useFluxReduxModels = buildModelsHook(isFluxReduxModelConfig);
export const useGlobalReferenceImageModels = buildModelsHook(
(config) =>
isIPAdapterModelConfig(config) ||
@@ -88,7 +78,6 @@ export const useGlobalReferenceImageModels = buildModelsHook(
export const useRegionalReferenceImageModels = buildModelsHook(
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config)
);
export const useLLaVAModels = buildModelsHook(isLLaVAModelConfig);
export const useVideoModels = buildModelsHook(isVideoModelConfig);
const buildModelsSelector =
@@ -100,23 +89,7 @@ const buildModelsSelector =
}
return modelConfigsAdapterSelectors.selectAll(result.data).filter(typeGuard);
};
// export const selectSDMainModels = buildModelsSelector(isNonRefinerNonFluxMainModelConfig);
// export const selectMainModels = buildModelsSelector(isNonRefinerMainModelConfig);
// export const selectNonSDXLMainModels = buildModelsSelector(isNonSDXLMainModelConfig);
// export const selectRefinerModels = buildModelsSelector(isRefinerMainModelModelConfig);
// export const selectFluxModels = buildModelsSelector(isFluxMainModelModelConfig);
// export const selectSDXLModels = buildModelsSelector(isSDXLMainModelModelConfig);
// export const selectLoRAModels = buildModelsSelector(isLoRAModelConfig);
// export const selectControlNetAndT2IAdapterModels = buildModelsSelector(isControlNetOrT2IAdapterModelConfig);
// export const selectControlNetModels = buildModelsSelector(isControlNetModelConfig);
// export const selectT2IAdapterModels = buildModelsSelector(isT2IAdapterModelConfig);
// export const selectT5EncoderModels = buildModelsSelector(isT5EncoderModelConfig);
// export const selectClipEmbedModels = buildModelsSelector(isClipEmbedModelConfig);
// export const selectSpandrelImageToImageModels = buildModelsSelector(isSpandrelImageToImageModelConfig);
export const selectIPAdapterModels = buildModelsSelector(isIPAdapterModelConfig);
// export const selectEmbeddingModels = buildModelsSelector(isTIModelConfig);
// export const selectVAEModels = buildModelsSelector(isVAEModelConfig);
// export const selectFluxVAEModels = buildModelsSelector(isFluxVAEModelConfig);
export const selectGlobalRefImageModels = buildModelsSelector(
(config) =>
isIPAdapterModelConfig(config) ||
@@ -129,19 +102,3 @@ export const selectGlobalRefImageModels = buildModelsSelector(
export const selectRegionalRefImageModels = buildModelsSelector(
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config)
);
export const buildSelectModelConfig = <T extends AnyModelConfig>(
key: string,
typeGuard: (config: AnyModelConfig) => config is T
): Selector<RootState, T | null> =>
createSelector(selectModelConfigsQuery, (result) => {
if (!result.data) {
return null;
}
return (
modelConfigsAdapterSelectors
.selectAll(result.data)
.filter(typeGuard)
.find((m) => m.key === key) ?? null
);
});

View File

@@ -16908,7 +16908,7 @@ export type components = {
* @description Storage format of model.
* @enum {string}
*/
ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api";
ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api" | "unknown";
/** ModelIdentifierField */
ModelIdentifierField: {
/**
@@ -17206,7 +17206,7 @@ export type components = {
* Config Out
* @description After successful installation, this will hold the configuration object.
*/
config_out?: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"]) | null;
config_out?: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]) | null;
/**
* Inplace
* @description Leave model in its current location; otherwise install under models directory
@@ -17292,7 +17292,7 @@ export type components = {
* Config
* @description The model's config
*/
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
/**
* @description The submodel type, if any
* @default null
@@ -17313,7 +17313,7 @@ export type components = {
* Config
* @description The model's config
*/
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
/**
* @description The submodel type, if any
* @default null
@@ -17469,7 +17469,7 @@ export type components = {
* @description Model type.
* @enum {string}
*/
ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video";
ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video" | "unknown";
/**
* ModelVariantType
* @description Variant type.
@@ -17482,7 +17482,7 @@ export type components = {
*/
ModelsList: {
/** Models */
models: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"])[];
models: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"])[];
};
/**
* Multiply Integers
@@ -21929,6 +21929,86 @@ export type components = {
*/
token: string;
};
/** UnknownModelConfig */
UnknownModelConfig: {
/**
* Key
* @description A unique key for this model.
*/
key: string;
/**
* Hash
* @description The hash of the model file(s).
*/
hash: string;
/**
* Path
* @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
*/
path: string;
/**
* File Size
* @description The size of the model in bytes.
*/
file_size: number;
/**
* Name
* @description Name of the model.
*/
name: string;
/**
* Type
* @default unknown
* @constant
*/
type: "unknown";
/**
* Format
* @default unknown
* @constant
*/
format: "unknown";
/**
* Base
* @default any
* @constant
*/
base: "any";
/**
* Source
* @description The original source of the model (path, URL or repo_id).
*/
source: string;
/** @description The type of source */
source_type: components["schemas"]["ModelSourceType"];
/**
* Description
* @description Model description
*/
description?: string | null;
/**
* Source Api Response
* @description The original API response from the source, as stringified JSON.
*/
source_api_response?: string | null;
/**
* Cover Image
* @description Url for image to preview model
*/
cover_image?: string | null;
/**
* Submodels
* @description Loadable submodels in this model
*/
submodels?: {
[key: string]: components["schemas"]["SubmodelDefinition"];
} | null;
/**
* Usage Info
* @description Usage information for this model
*/
usage_info?: string | null;
};
/**
* Unsharp Mask
* @description Applies an unsharp mask filter to an image
@@ -22939,7 +23019,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
};
};
/** @description Validation Error */
@@ -22989,7 +23069,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
};
};
/** @description Bad request */
@@ -23094,7 +23174,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
};
};
/** @description Bad request */
@@ -23608,7 +23688,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"];
"application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"];
};
};
/** @description Bad request */

View File

@@ -134,6 +134,7 @@ type SigLipModelConfig = S['SigLIPConfig'];
export type FLUXReduxModelConfig = S['FluxReduxConfig'];
type ApiModelConfig = S['ApiModelConfig'];
export type VideoApiModelConfig = S['VideoApiModelConfig'];
type UnknownModelConfig = S['UnknownModelConfig'];
export type MainModelConfig = DiffusersModelConfig | CheckpointModelConfig | ApiModelConfig;
export type FLUXKontextModelConfig = MainModelConfig;
export type ChatGPT4oModelConfig = ApiModelConfig;
@@ -155,7 +156,8 @@ export type AnyModelConfig =
| CLIPVisionDiffusersConfig
| SigLipModelConfig
| FLUXReduxModelConfig
| LlavaOnevisionConfig;
| LlavaOnevisionConfig
| UnknownModelConfig;
/**
* Checks if a list of submodels contains any that match a given variant or type
@@ -199,10 +201,17 @@ export const isControlLoRAModelConfig = (config: AnyModelConfig): config is Cont
return config.type === 'control_lora';
};
export const isVAEModelConfig = (config: AnyModelConfig, excludeSubmodels?: boolean): config is VAEModelConfig => {
export const isVAEModelConfigOrSubmodel = (
config: AnyModelConfig,
excludeSubmodels?: boolean
): config is VAEModelConfig => {
return config.type === 'vae' || (!excludeSubmodels && config.type === 'main' && checkSubmodels(['vae'], config));
};
export const isVAEModelConfig = (config: AnyModelConfig): config is VAEModelConfig => {
return config.type === 'vae';
};
export const isNonFluxVAEModelConfig = (
config: AnyModelConfig,
excludeSubmodels?: boolean
@@ -246,7 +255,7 @@ export const isT2IAdapterModelConfig = (config: AnyModelConfig): config is T2IAd
return config.type === 't2i_adapter';
};
export const isT5EncoderModelConfig = (
export const isT5EncoderModelConfigOrSubmodel = (
config: AnyModelConfig,
excludeSubmodels?: boolean
): config is T5EncoderModelConfig | T5EncoderBnbQuantizedLlmInt8bModelConfig => {
@@ -256,7 +265,13 @@ export const isT5EncoderModelConfig = (
);
};
export const isCLIPEmbedModelConfig = (
export const isT5EncoderModelConfig = (
config: AnyModelConfig
): config is T5EncoderModelConfig | T5EncoderBnbQuantizedLlmInt8bModelConfig => {
return config.type === 't5_encoder';
};
export const isCLIPEmbedModelConfigOrSubmodel = (
config: AnyModelConfig,
excludeSubmodels?: boolean
): config is CLIPEmbedModelConfig => {
@@ -266,7 +281,11 @@ export const isCLIPEmbedModelConfig = (
);
};
export const isCLIPLEmbedModelConfig = (
export const isCLIPEmbedModelConfig = (config: AnyModelConfig): config is CLIPEmbedModelConfig => {
return config.type === 'clip_embed';
};
export const isCLIPLEmbedModelConfigOrSubmodel = (
config: AnyModelConfig,
excludeSubmodels?: boolean
): config is CLIPLEmbedModelConfig => {
@@ -276,7 +295,7 @@ export const isCLIPLEmbedModelConfig = (
);
};
export const isCLIPGEmbedModelConfig = (
export const isCLIPGEmbedModelConfigOrSubmodel = (
config: AnyModelConfig,
excludeSubmodels?: boolean
): config is CLIPGEmbedModelConfig => {
@@ -308,6 +327,10 @@ export const isVideoModelConfig = (config: AnyModelConfig): config is VideoApiMo
return config.type === 'video';
};
export const isUnknownModelConfig = (config: AnyModelConfig): config is UnknownModelConfig => {
return config.type === 'unknown';
};
export const isFluxKontextApiModelConfig = (config: AnyModelConfig): config is ApiModelConfig => {
return config.type === 'main' && config.base === 'flux-kontext';
};

189
test.json Normal file
View File

@@ -0,0 +1,189 @@
{
"$defs": {
"ClipVariantType": {
"description": "Variant type.",
"enum": ["large", "gigantic"],
"title": "ClipVariantType",
"type": "string"
},
"ModelSourceType": {
"description": "Model source type.",
"enum": ["path", "url", "hf_repo_id"],
"title": "ModelSourceType",
"type": "string"
},
"ModelType": {
"description": "Model type.",
"enum": [
"onnx",
"main",
"vae",
"lora",
"control_lora",
"controlnet",
"embedding",
"ip_adapter",
"clip_vision",
"clip_embed",
"t2i_adapter",
"t5_encoder",
"spandrel_image_to_image",
"siglip",
"flux_redux",
"llava_onevision",
"video",
"unknown"
],
"title": "ModelType",
"type": "string"
},
"ModelVariantType": {
"description": "Variant type.",
"enum": ["normal", "inpaint", "depth"],
"title": "ModelVariantType",
"type": "string"
},
"SubModelType": {
"description": "Submodel type.",
"enum": [
"unet",
"transformer",
"text_encoder",
"text_encoder_2",
"text_encoder_3",
"tokenizer",
"tokenizer_2",
"tokenizer_3",
"vae",
"vae_decoder",
"vae_encoder",
"scheduler",
"safety_checker"
],
"title": "SubModelType",
"type": "string"
},
"SubmodelDefinition": {
"properties": {
"path_or_prefix": { "title": "Path Or Prefix", "type": "string" },
"model_type": { "$ref": "#/$defs/ModelType" },
"variant": {
"anyOf": [
{ "$ref": "#/$defs/ModelVariantType" },
{ "$ref": "#/$defs/ClipVariantType" },
{ "type": "null" }
],
"default": null,
"title": "Variant"
}
},
"required": ["path_or_prefix", "model_type"],
"title": "SubmodelDefinition",
"type": "object"
}
},
"properties": {
"key": {
"description": "A unique key for this model.",
"title": "Key",
"type": "string"
},
"hash": {
"description": "The hash of the model file(s).",
"title": "Hash",
"type": "string"
},
"path": {
"description": "Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.",
"title": "Path",
"type": "string"
},
"file_size": {
"description": "The size of the model in bytes.",
"title": "File Size",
"type": "integer"
},
"name": {
"description": "Name of the model.",
"title": "Name",
"type": "string"
},
"type": {
"const": "unknown",
"default": "unknown",
"title": "Type",
"type": "string"
},
"format": {
"const": "unknown",
"default": "unknown",
"title": "Format",
"type": "string"
},
"base": {
"const": "any",
"default": "any",
"title": "Base",
"type": "string"
},
"source": {
"description": "The original source of the model (path, URL or repo_id).",
"title": "Source",
"type": "string"
},
"source_type": {
"$ref": "#/$defs/ModelSourceType",
"description": "The type of source"
},
"description": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"default": null,
"description": "Model description",
"title": "Description"
},
"source_api_response": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"default": null,
"description": "The original API response from the source, as stringified JSON.",
"title": "Source Api Response"
},
"cover_image": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"default": null,
"description": "Url for image to preview model",
"title": "Cover Image"
},
"submodels": {
"anyOf": [
{
"additionalProperties": { "$ref": "#/$defs/SubmodelDefinition" },
"propertyNames": { "$ref": "#/$defs/SubModelType" },
"type": "object"
},
{ "type": "null" }
],
"default": null,
"description": "Loadable submodels in this model",
"title": "Submodels"
},
"usage_info": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"default": null,
"description": "Usage information for this model",
"title": "Usage Info"
}
},
"required": [
"hash",
"path",
"file_size",
"name",
"source",
"source_type",
"key",
"type",
"format"
],
"title": "UnknownModelConfig",
"type": "object"
}