build graph for cogview4

This commit is contained in:
Mary Hipp
2025-03-17 14:57:50 -04:00
committed by psychedelicious
parent 305c5761d0
commit 9846229e52
17 changed files with 295 additions and 99 deletions

View File

@@ -25,7 +25,7 @@ class BaseModelType(str, Enum):
StableDiffusionXL = "sdxl"
StableDiffusionXLRefiner = "sdxl-refiner"
Flux = "flux"
CogView4 = " cogview4"
CogView4 = "cogview4"
# Kandinsky2_1 = "kandinsky-2.1"

View File

@@ -6,6 +6,7 @@ import { withResult, withResultAsync } from 'common/util/result';
import { parseify } from 'common/util/serialize';
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
@@ -45,6 +46,8 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
return await buildSD3Graph(state, manager);
case `flux`:
return await buildFLUXGraph(state, manager);
case 'cogview4':
return await buildCogView4Graph(state, manager);
default:
assert(false, `No graph builders for base ${base}`);
}

View File

@@ -5,12 +5,14 @@ import { range } from 'lodash-es';
import type { components } from 'services/api/schema';
import type { Batch, EnqueueBatchArg, Invocation } from 'services/api/types';
import type { ConditioningNodes, NoiseNodes } from './types';
export const prepareLinearUIBatch = (
state: RootState,
g: Graph,
prepend: boolean,
noise: Invocation<'noise' | 'flux_denoise' | 'sd3_denoise'>,
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'sd3_text_encoder'>,
noise: Invocation<NoiseNodes>,
posCond: Invocation<ConditioningNodes>,
origin: 'canvas' | 'workflows' | 'upscaling',
destination: 'canvas' | 'gallery'
): EnqueueBatchArg => {

View File

@@ -2,18 +2,22 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { CanvasState, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type {
DenoiseLatentsNodes,
LatentToImageNodes,
MainModelLoaderNodes,
VaeSourceNodes,
} from 'features/nodes/util/graph/types';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
type AddImageToImageArg = {
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode' | 'sd3_l2i'>;
i2lNodeType: 'i2l' | 'flux_vae_encode' | 'sd3_i2l';
denoise: Invocation<'denoise_latents' | 'flux_denoise' | 'sd3_denoise'>;
vaeSource: Invocation<
'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader' | 'sd3_model_loader'
>;
l2i: Invocation<LatentToImageNodes>;
i2lNodeType: 'i2l' | 'flux_vae_encode' | 'sd3_i2l' | 'cogview4_i2l';
denoise: Invocation<DenoiseLatentsNodes>;
vaeSource: Invocation<VaeSourceNodes | MainModelLoaderNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
bbox: CanvasState['bbox'];
@@ -33,7 +37,7 @@ export const addImageToImage = async ({
bbox,
denoising_start,
fp32,
}: AddImageToImageArg): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i'>> => {
}: AddImageToImageArg): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'cogview4_l2i'>> => {
denoise.denoising_start = denoising_start;
const adapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, bbox.rect, {

View File

@@ -6,6 +6,13 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { isMainModelWithoutUnet } from 'features/nodes/util/graph/graphBuilderUtils';
import type {
DenoiseLatentsNodes,
LatentToImageNodes,
MainModelLoaderNodes,
VaeSourceNodes,
} from 'features/nodes/util/graph/types';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
@@ -13,13 +20,11 @@ type AddInpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode' | 'sd3_l2i'>;
i2lNodeType: 'i2l' | 'flux_vae_encode' | 'sd3_i2l';
denoise: Invocation<'denoise_latents' | 'flux_denoise' | 'sd3_denoise'>;
vaeSource: Invocation<
'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader' | 'sd3_model_loader'
>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'sd3_model_loader'>;
l2i: Invocation<LatentToImageNodes>;
i2lNodeType: 'i2l' | 'flux_vae_encode' | 'sd3_i2l' | 'cogview4_i2l';
denoise: Invocation<DenoiseLatentsNodes>;
vaeSource: Invocation<VaeSourceNodes | MainModelLoaderNodes>;
modelLoader: Invocation<MainModelLoaderNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
@@ -119,7 +124,7 @@ export const addInpaint = async ({
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
if (modelLoader.type !== 'flux_model_loader' && modelLoader.type !== 'sd3_model_loader') {
if (!isMainModelWithoutUnet(modelLoader)) {
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
}
g.addEdge(resizeImageToScaledSize, 'image', createGradientMask, 'image');
@@ -184,7 +189,7 @@ export const addInpaint = async ({
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
if (modelLoader.type !== 'flux_model_loader' && modelLoader.type !== 'sd3_model_loader') {
if (!isMainModelWithoutUnet(modelLoader)) {
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
}
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');

View File

@@ -1,5 +1,6 @@
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import type { Invocation } from 'services/api/types';
/**
@@ -8,19 +9,7 @@ import type { Invocation } from 'services/api/types';
* @param imageOutput The current image output node
* @returns The nsfw checker node
*/
export const addNSFWChecker = (
g: Graph,
imageOutput: Invocation<
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
>
): Invocation<'img_nsfw'> => {
export const addNSFWChecker = (g: Graph, imageOutput: Invocation<ImageOutputNodes>): Invocation<'img_nsfw'> => {
const nsfw = g.addNode({
type: 'img_nsfw',
id: getPrefixedId('nsfw_checker'),

View File

@@ -6,7 +6,14 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getInfill } from 'features/nodes/util/graph/graphBuilderUtils';
import { getInfill, isMainModelWithoutUnet } from 'features/nodes/util/graph/graphBuilderUtils';
import type {
DenoiseLatentsNodes,
ImageToLatentsNodes,
LatentToImageNodes,
MainModelLoaderNodes,
VaeSourceNodes,
} from 'features/nodes/util/graph/types';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
@@ -14,13 +21,11 @@ type AddOutpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode' | 'sd3_l2i'>;
i2lNodeType: 'i2l' | 'flux_vae_encode' | 'sd3_i2l';
denoise: Invocation<'denoise_latents' | 'flux_denoise' | 'sd3_denoise'>;
vaeSource: Invocation<
'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader' | 'sd3_model_loader'
>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'sd3_model_loader'>;
l2i: Invocation<LatentToImageNodes>;
i2lNodeType: ImageToLatentsNodes;
denoise: Invocation<DenoiseLatentsNodes>;
vaeSource: Invocation<VaeSourceNodes | MainModelLoaderNodes>;
modelLoader: Invocation<MainModelLoaderNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
@@ -116,7 +121,7 @@ export const addOutpaint = async ({
g.addEdge(infill, 'image', createGradientMask, 'image');
g.addEdge(resizeInputMaskToScaledSize, 'image', createGradientMask, 'mask');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
if (modelLoader.type !== 'flux_model_loader' && modelLoader.type !== 'sd3_model_loader') {
if (!isMainModelWithoutUnet(modelLoader)) {
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
}
@@ -216,7 +221,7 @@ export const addOutpaint = async ({
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
if (modelLoader.type !== 'flux_model_loader' && modelLoader.type !== 'sd3_model_loader') {
if (!isMainModelWithoutUnet(modelLoader)) {
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
}

View File

@@ -1,12 +1,13 @@
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { LatentToImageNodes } from 'features/nodes/util/graph/types';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
type AddTextToImageArg = {
g: Graph;
l2i: Invocation<'l2i' | 'flux_vae_decode' | 'sd3_l2i'>;
l2i: Invocation<LatentToImageNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
};
@@ -16,7 +17,7 @@ export const addTextToImage = ({
l2i,
originalSize,
scaledSize,
}: AddTextToImageArg): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i'> => {
}: AddTextToImageArg): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'cogview4_l2i'> => {
if (!isEqual(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({

View File

@@ -1,5 +1,6 @@
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import type { Invocation } from 'services/api/types';
/**
@@ -8,19 +9,7 @@ import type { Invocation } from 'services/api/types';
* @param imageOutput The image output node
* @returns The watermark node
*/
export const addWatermarker = (
g: Graph,
imageOutput: Invocation<
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
>
): Invocation<'img_watermark'> => {
export const addWatermarker = (g: Graph, imageOutput: Invocation<ImageOutputNodes>): Invocation<'img_watermark'> => {
const watermark = g.addNode({
type: 'img_watermark',
id: getPrefixedId('watermarker'),

View File

@@ -0,0 +1,187 @@
import { logger } from 'app/logging/logger';
import type { RootState } from 'app/store/store';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage';
import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
const log = logger('system');
export const buildCogView4Graph = async (
state: RootState,
manager: CanvasManager
): Promise<{ g: Graph; noise: Invocation<'cogview4_denoise'>; posCond: Invocation<'cogview4_text_encoder'> }> => {
const generationMode = await manager.compositor.getGenerationMode();
log.debug({ generationMode }, 'Building CogView4 graph');
const params = selectParamsSlice(state);
const canvasSettings = selectCanvasSettingsSlice(state);
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const { model, cfgScale: cfg_scale, seed, steps } = params;
assert(model, 'No model found in state');
const { originalSize, scaledSize } = getSizes(bbox);
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
const g = new Graph(getPrefixedId('cogview4_graph'));
const modelLoader = g.addNode({
type: 'cogview4_model_loader',
id: getPrefixedId('cogview4_model_loader'),
model,
});
const posCond = g.addNode({
type: 'cogview4_text_encoder',
id: getPrefixedId('pos_prompt'),
prompt: positivePrompt,
});
const negCond = g.addNode({
type: 'cogview4_text_encoder',
id: getPrefixedId('neg_prompt'),
prompt: negativePrompt,
});
const denoise = g.addNode({
type: 'cogview4_denoise',
id: getPrefixedId('denoise_latents'),
cfg_scale,
width: scaledSize.width,
height: scaledSize.height,
steps,
denoising_start: 0,
denoising_end: 1,
});
const l2i = g.addNode({
type: 'cogview4_l2i',
id: getPrefixedId('l2i'),
});
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'glm_encoder', posCond, 'glm_encoder');
g.addEdge(modelLoader, 'glm_encoder', negCond, 'glm_encoder');
g.addEdge(modelLoader, 'vae', l2i, 'vae');
g.addEdge(posCond, 'conditioning', denoise, 'positive_conditioning');
g.addEdge(negCond, 'conditioning', denoise, 'negative_conditioning');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'cogview4');
g.upsertMetadata({
generation_mode: 'cogview4_txt2img',
cfg_scale,
width: originalSize.width,
height: originalSize.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: Graph.getModelMetadataField(modelConfig),
seed,
steps,
});
const denoising_start = 1 - params.img2imgStrength;
let canvasOutput: Invocation<ImageOutputNodes> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage({
g,
manager,
l2i,
i2lNodeType: 'cogview4_i2l',
denoise,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
denoising_start,
fp32: false,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint({
state,
g,
manager,
l2i,
i2lNodeType: 'cogview4_i2l',
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32: false,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint({
state,
g,
manager,
l2i,
i2lNodeType: 'cogview4_i2l',
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32: false,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
if (state.system.shouldUseNSFWChecker) {
canvasOutput = addNSFWChecker(g, canvasOutput);
}
if (state.system.shouldUseWatermarker) {
canvasOutput = addWatermarker(g, canvasOutput);
}
// This image will be staged, should not be saved to the gallery or added to a board.
const is_intermediate = canvasSettings.sendToCanvas;
const board = canvasSettings.sendToCanvas ? undefined : getBoardField(state);
if (!canvasSettings.sendToCanvas) {
g.upsertMetadata(selectCanvasMetadata(state));
}
g.updateNode(canvasOutput, {
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,
});
g.setMetadataReceivingNode(canvasOutput);
return { g, noise: denoise, posCond };
};

View File

@@ -22,6 +22,7 @@ import {
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import { t } from 'i18next';
import { selectMainModelConfig } from 'services/api/endpoints/models';
import type { Invocation } from 'services/api/types';
@@ -165,16 +166,7 @@ export const buildFLUXGraph = async (
denoising_start = 1 - img2imgStrength;
}
let canvasOutput: Invocation<
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
> = l2i;
let canvasOutput: Invocation<ImageOutputNodes> = l2i;
if (isFLUXFill) {
canvasOutput = await addFLUXFill({

View File

@@ -23,6 +23,7 @@ import {
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectMainModelConfig } from 'services/api/endpoints/models';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
@@ -167,16 +168,7 @@ export const buildSD1Graph = async (
const denoising_start = 1 - params.img2imgStrength;
let canvasOutput: Invocation<
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
> = l2i;
let canvasOutput: Invocation<ImageOutputNodes> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });

View File

@@ -18,6 +18,7 @@ import {
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectMainModelConfig } from 'services/api/endpoints/models';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
@@ -131,16 +132,7 @@ export const buildSD3Graph = async (
denoising_start = 1 - img2imgStrength;
}
let canvasOutput: Invocation<
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
> = l2i;
let canvasOutput: Invocation<ImageOutputNodes> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });

View File

@@ -23,6 +23,7 @@ import {
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectMainModelConfig } from 'services/api/endpoints/models';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
@@ -173,16 +174,7 @@ export const buildSDXLGraph = async (
? Math.min(refinerStart, 1 - params.img2imgStrength)
: 1 - params.img2imgStrength;
let canvasOutput: Invocation<
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
> = l2i;
let canvasOutput: Invocation<ImageOutputNodes> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });

View File

@@ -9,6 +9,8 @@ import { stylePresetsApi } from 'services/api/endpoints/stylePresets';
import type { Invocation } from 'services/api/types';
import { assert } from 'tsafe';
import type { MainModelLoaderNodes } from './types';
/**
* Gets the board field, based on the autoAddBoardId setting.
*/
@@ -119,3 +121,11 @@ export const getInfill = (
};
export const CANVAS_OUTPUT_PREFIX = 'canvas_output';
export const isMainModelWithoutUnet = (modelLoader: Invocation<MainModelLoaderNodes>) => {
return (
modelLoader.type === 'flux_model_loader' ||
modelLoader.type === 'sd3_model_loader' ||
modelLoader.type === 'cogview4_model_loader'
);
};

View File

@@ -0,0 +1,33 @@
export type ImageOutputNodes =
| 'l2i'
| 'img_nsfw'
| 'img_watermark'
| 'img_resize'
| 'invokeai_img_blend'
| 'apply_mask_to_image'
| 'flux_vae_decode'
| 'sd3_l2i'
| 'cogview4_l2i';
export type LatentToImageNodes = 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'cogview4_l2i';
export type ImageToLatentsNodes = 'i2l' | 'flux_vae_encode' | 'sd3_i2l' | 'cogview4_i2l';
export type DenoiseLatentsNodes = 'denoise_latents' | 'flux_denoise' | 'sd3_denoise' | 'cogview4_denoise';
export type MainModelLoaderNodes =
| 'main_model_loader'
| 'sdxl_model_loader'
| 'flux_model_loader'
| 'sd3_model_loader'
| 'cogview4_model_loader';
export type VaeSourceNodes = 'seamless' | 'vae_loader';
export type NoiseNodes = 'noise' | 'flux_denoise' | 'sd3_denoise' | 'cogview4_denoise';
export type ConditioningNodes =
| 'compel'
| 'sdxl_compel_prompt'
| 'flux_text_encoder'
| 'sd3_text_encoder'
| 'cogview4_text_encoder';

View File

@@ -1970,7 +1970,7 @@ export type components = {
* @description Base model type.
* @enum {string}
*/
BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | " cogview4";
BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4";
/** Batch */
Batch: {
/**
@@ -5119,7 +5119,7 @@ export type components = {
* @description The generation mode that output this image
* @default null
*/
generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint") | null;
generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint") | null;
/**
* Positive Prompt
* @description The positive prompt parameter