refactor(ui): simplifiy graph builders (WIP)

This commit is contained in:
psychedelicious
2025-07-07 17:14:35 +10:00
parent e3c1334b1f
commit 9945c20d02
16 changed files with 250 additions and 252 deletions

View File

@@ -59,13 +59,11 @@ export const useDynamicPromptsWatcher = () => {
return;
}
const { positivePrompt } = presetModifiedPrompts;
// Before we execute, imperatively check the dynamic prompts query cache to see if we have already fetched this prompt
const state = getState();
const cachedPrompts = utilitiesApi.endpoints.dynamicPrompts.select({
prompt: positivePrompt,
prompt: presetModifiedPrompts.positive,
max_prompts: maxPrompts,
})(state).data;
@@ -77,8 +75,8 @@ export const useDynamicPromptsWatcher = () => {
}
// If the prompt is not in the cache, check if we should process it - this is just looking for dynamic prompts syntax
if (!getShouldProcessPrompt(positivePrompt)) {
dispatch(promptsChanged([positivePrompt]));
if (!getShouldProcessPrompt(presetModifiedPrompts.positive)) {
dispatch(promptsChanged([presetModifiedPrompts.positive]));
dispatch(parsingErrorChanged(undefined));
dispatch(isErrorChanged(false));
return;
@@ -89,6 +87,6 @@ export const useDynamicPromptsWatcher = () => {
dispatch(isLoadingChanged(true));
}
debouncedUpdateDynamicPrompts(positivePrompt, maxPrompts);
debouncedUpdateDynamicPrompts(presetModifiedPrompts.positive, maxPrompts);
}, [debouncedUpdateDynamicPrompts, dispatch, dynamicPrompting, getState, maxPrompts, presetModifiedPrompts]);
};

View File

@@ -4,10 +4,11 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getDenoisingStartAndEnd } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getDenoisingStartAndEnd,
getOriginalAndScaledSizesForOtherModes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
type AddFLUXFillArg = {
@@ -16,8 +17,6 @@ type AddFLUXFillArg = {
manager: CanvasManager;
l2i: Invocation<'flux_vae_decode'>;
denoise: Invocation<'flux_denoise'>;
originalSize: Dimensions;
scaledSize: Dimensions;
};
export const addFLUXFill = async ({
@@ -26,8 +25,6 @@ export const addFLUXFill = async ({
manager,
l2i,
denoise,
originalSize,
scaledSize,
}: AddFLUXFillArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state);
denoise.denoising_start = denoising_start;
@@ -35,18 +32,17 @@ export const addFLUXFill = async ({
const params = selectParamsSlice(state);
const canvasSettings = selectCanvasSettingsSlice(state);
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const { originalSize, scaledSize, rect } = getOriginalAndScaledSizesForOtherModes(state);
const rasterAdapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, bbox.rect, {
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, rect, {
is_intermediate: true,
silent: true,
});
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, rect, {
is_intermediate: true,
silent: true,
});

View File

@@ -2,9 +2,11 @@ import { objectEquals } from '@observ33r/object-equals';
import type { RootState } from 'app/store/store';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { CanvasState, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getDenoisingStartAndEnd } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getDenoisingStartAndEnd,
getOriginalAndScaledSizesForOtherModes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type {
DenoiseLatentsNodes,
LatentToImageNodes,
@@ -21,9 +23,6 @@ type AddImageToImageArg = {
i2l: Invocation<'i2l' | 'flux_vae_encode' | 'sd3_i2l' | 'cogview4_i2l'>;
denoise: Invocation<DenoiseLatentsNodes>;
vaeSource: Invocation<VaeSourceNodes | MainModelLoaderNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
bbox: CanvasState['bbox'];
};
export const addImageToImage = async ({
@@ -34,16 +33,15 @@ export const addImageToImage = async ({
i2l,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
}: AddImageToImageArg): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'cogview4_l2i'>> => {
const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state);
denoise.denoising_start = denoising_start;
denoise.denoising_end = denoising_end;
const { originalSize, scaledSize, rect } = getOriginalAndScaledSizesForOtherModes(state);
const adapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, bbox.rect, {
const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, rect, {
is_intermediate: true,
silent: true,
});

View File

@@ -4,10 +4,12 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getDenoisingStartAndEnd, isMainModelWithoutUnet } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getDenoisingStartAndEnd,
getOriginalAndScaledSizesForOtherModes,
isMainModelWithoutUnet,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type {
DenoiseLatentsNodes,
LatentToImageNodes,
@@ -25,8 +27,6 @@ type AddInpaintArg = {
denoise: Invocation<DenoiseLatentsNodes>;
vaeSource: Invocation<VaeSourceNodes | MainModelLoaderNodes>;
modelLoader: Invocation<MainModelLoaderNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
seed: Invocation<'integer'>;
};
@@ -39,8 +39,6 @@ export const addInpaint = async ({
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
seed,
}: AddInpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state);
@@ -49,9 +47,8 @@ export const addInpaint = async ({
const params = selectParamsSlice(state);
const canvasSettings = selectCanvasSettingsSlice(state);
const canvas = selectCanvasSlice(state);
const { rect } = canvas.bbox;
const { originalSize, scaledSize, rect } = getOriginalAndScaledSizesForOtherModes(state);
const rasterAdapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, rect, {

View File

@@ -4,12 +4,11 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
getDenoisingStartAndEnd,
getInfill,
getOriginalAndScaledSizesForOtherModes,
isMainModelWithoutUnet,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type {
@@ -22,30 +21,26 @@ import type {
import type { ImageDTO, Invocation } from 'services/api/types';
type AddOutpaintArg = {
state: RootState;
g: Graph;
state: RootState;
manager: CanvasManager;
l2i: Invocation<LatentToImageNodes>;
i2l: Invocation<ImageToLatentsNodes>;
denoise: Invocation<DenoiseLatentsNodes>;
vaeSource: Invocation<VaeSourceNodes | MainModelLoaderNodes>;
modelLoader: Invocation<MainModelLoaderNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
seed: Invocation<'integer'>;
};
export const addOutpaint = async ({
state,
g,
state,
manager,
l2i,
i2l,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
seed,
}: AddOutpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state);
@@ -54,20 +49,17 @@ export const addOutpaint = async ({
const params = selectParamsSlice(state);
const canvasSettings = selectCanvasSettingsSlice(state);
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const { originalSize, scaledSize, rect } = getOriginalAndScaledSizesForOtherModes(state);
const rasterAdapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, bbox.rect, {
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, rect, {
is_intermediate: true,
silent: true,
});
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
const { rect } = canvas.bbox;
// Get inpaint mask adapters that have noise settings
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);

View File

@@ -1,28 +1,47 @@
import { objectEquals } from '@observ33r/object-equals';
import type { RootState } from 'app/store/store';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getOriginalAndScaledSizesForTextToImage } from 'features/nodes/util/graph/graphBuilderUtils';
import type { DenoiseLatentsNodes, LatentToImageNodes } from 'features/nodes/util/graph/types';
import type { Invocation } from 'services/api/types';
import { assert } from 'tsafe';
type AddTextToImageArg = {
g: Graph;
state: RootState;
noise?: Invocation<'noise'>;
denoise: Invocation<DenoiseLatentsNodes>;
l2i: Invocation<LatentToImageNodes>;
originalSize: Dimensions;
scaledSize: Dimensions;
};
export const addTextToImage = ({
g,
state,
noise,
denoise,
l2i,
originalSize,
scaledSize,
}: AddTextToImageArg): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'cogview4_l2i'> => {
denoise.denoising_start = 0;
denoise.denoising_end = 1;
const { originalSize, scaledSize } = getOriginalAndScaledSizesForTextToImage(state);
if (denoise.type === 'cogview4_denoise' || denoise.type === 'flux_denoise' || denoise.type === 'sd3_denoise') {
denoise.width = scaledSize.width;
denoise.height = scaledSize.height;
} else {
assert(denoise.type === 'denoise_latents');
assert(noise, 'SD1.5/SD2/SDXL graphs require a noise node to be passed in');
noise.width = scaledSize.width;
noise.height = scaledSize.height;
}
g.upsertMetadata({
width: originalSize.width,
height: originalSize.height,
});
if (!objectEquals(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({

View File

@@ -2,13 +2,19 @@ import { logger } from 'app/logging/logger';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { selectCanvasMetadata } from 'features/controlLayers/store/selectors';
import { isChatGPT4oAspectRatioID, isChatGPT4oReferenceImageConfig } from 'features/controlLayers/store/types';
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
import { type ImageField, zModelIdentifierField } from 'features/nodes/types/common';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { selectCanvasOutputFields, selectOriginalAndScaledSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getOriginalAndScaledSizesForOtherModes,
getOriginalAndScaledSizesForTextToImage,
selectCanvasOutputFields,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types';
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { t } from 'i18next';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -28,8 +34,6 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise<GraphBu
const refImages = selectRefImagesSlice(state);
const { originalSize, scaledSize, aspectRatio } = selectOriginalAndScaledSizes(state);
assert(model, 'No model selected');
assert(model.base === 'chatgpt-4o', 'Selected model is not a ChatGPT 4o API model');
@@ -52,6 +56,7 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise<GraphBu
}
if (generationMode === 'txt2img') {
const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state);
assert(isChatGPT4oAspectRatioID(aspectRatio.id), 'ChatGPT 4o does not support this aspect ratio');
const g = new Graph(getPrefixedId('chatgpt_4o_txt2img_graph'));
@@ -85,12 +90,13 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise<GraphBu
g,
positivePrompt,
};
}
} else if (generationMode === 'img2img') {
const { aspectRatio, rect } = getOriginalAndScaledSizesForOtherModes(state);
assert(isChatGPT4oAspectRatioID(aspectRatio.id), 'ChatGPT 4o does not support this aspect ratio');
if (generationMode === 'img2img') {
assert(manager !== null);
const adapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, bbox.rect, {
const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, rect, {
is_intermediate: true,
silent: true,
});
@@ -103,7 +109,7 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise<GraphBu
// @ts-expect-error: These nodes are not available in the OSS application
type: 'chatgpt_4o_edit_image',
model: zModelIdentifierField.parse(model),
aspect_ratio: bbox.aspectRatio.id,
aspect_ratio: aspectRatio.id,
base_image: { image_name },
reference_images,
...selectCanvasOutputFields(state),
@@ -119,9 +125,14 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise<GraphBu
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
g.upsertMetadata({
model: Graph.getModelMetadataField(model),
width: bbox.rect.width,
height: bbox.rect.height,
width: rect.width,
height: rect.height,
});
if (selectActiveTab(state) === 'canvas') {
g.upsertMetadata(selectCanvasMetadata(state));
}
return {
g,
positivePrompt,

View File

@@ -1,7 +1,7 @@
import { logger } from 'app/logging/logger';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectMainModelConfig, selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { selectCanvasMetadata } from 'features/controlLayers/store/selectors';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage';
import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
@@ -10,12 +10,9 @@ import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
selectCanvasOutputFields,
selectOriginalAndScaledSizes,
selectPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
@@ -33,13 +30,9 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
assert(model.base === 'cogview4', 'Selected model is not a CogView4 model');
const params = selectParamsSlice(state);
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const { cfgScale: cfg_scale, steps } = params;
const { originalSize, scaledSize } = selectOriginalAndScaledSizes(state);
const prompts = selectPresetModifiedPrompts(state);
const g = new Graph(getPrefixedId('cogview4_graph'));
@@ -73,18 +66,12 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
type: 'cogview4_denoise',
id: getPrefixedId('denoise_latents'),
cfg_scale,
width: scaledSize.width,
height: scaledSize.height,
steps,
});
const l2i = g.addNode({
type: 'cogview4_l2i',
id: getPrefixedId('l2i'),
});
const i2l = g.addNode({
type: 'cogview4_i2l',
id: getPrefixedId('cogview4_i2l'),
});
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'glm_encoder', posCond, 'glm_encoder');
@@ -104,8 +91,6 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
g.upsertMetadata({
cfg_scale,
width: originalSize.width,
height: originalSize.height,
negative_prompt: prompts.negative,
model: Graph.getModelMetadataField(modelConfig),
steps,
@@ -118,14 +103,18 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({
g,
state,
denoise,
l2i,
originalSize,
scaledSize,
});
g.upsertMetadata({ generation_mode: 'cogview4_txt2img' });
} else if (generationMode === 'img2img') {
assert(manager !== null);
const i2l = g.addNode({
type: 'cogview4_i2l',
id: getPrefixedId('cogview4_i2l'),
});
canvasOutput = await addImageToImage({
g,
state,
@@ -134,13 +123,15 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
l2i,
i2l,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
});
g.upsertMetadata({ generation_mode: 'cogview4_img2img' });
} else if (generationMode === 'inpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'cogview4_i2l',
id: getPrefixedId('cogview4_i2l'),
});
canvasOutput = await addInpaint({
g,
state,
@@ -150,13 +141,16 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'cogview4_inpaint' });
} else if (generationMode === 'outpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'cogview4_i2l',
id: getPrefixedId('cogview4_i2l'),
});
canvasOutput = await addOutpaint({
g,
state,
@@ -166,8 +160,6 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'cogview4_outpaint' });
@@ -183,10 +175,12 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise<GraphBui
canvasOutput = addWatermarker(g, canvasOutput);
}
g.upsertMetadata(selectCanvasMetadata(state));
g.updateNode(canvasOutput, selectCanvasOutputFields(state));
if (selectActiveTab(state) === 'canvas') {
g.upsertMetadata(selectCanvasMetadata(state));
}
g.setMetadataReceivingNode(canvasOutput);
return {

View File

@@ -16,9 +16,10 @@ import { addRegions } from 'features/nodes/util/graph/generation/addRegions';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { selectCanvasOutputFields, selectOriginalAndScaledSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import { selectCanvasOutputFields } from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { t } from 'i18next';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
@@ -41,10 +42,6 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
const canvas = selectCanvasSlice(state);
const refImages = selectRefImagesSlice(state);
const { bbox } = canvas;
const { originalSize, scaledSize } = selectOriginalAndScaledSizes(state);
const { guidance: baseGuidance, steps, fluxVAE, t5EncoderModel, clipEmbedModel } = params;
assert(t5EncoderModel, 'No T5 Encoder model found in state');
@@ -118,18 +115,12 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
id: getPrefixedId('flux_denoise'),
guidance,
num_steps: steps,
width: scaledSize.width,
height: scaledSize.height,
});
const l2i = g.addNode({
type: 'flux_vae_decode',
id: getPrefixedId('flux_vae_decode'),
});
const i2l = g.addNode({
type: 'flux_vae_encode',
id: getPrefixedId('flux_vae_encode'),
});
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'vae', denoise, 'controlnet_vae');
@@ -149,8 +140,6 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
g.upsertMetadata({
guidance,
width: originalSize.width,
height: originalSize.height,
model: Graph.getModelMetadataField(model),
steps,
vae: fluxVAE,
@@ -195,20 +184,21 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
manager,
l2i,
denoise,
originalSize,
scaledSize,
});
} else if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({
g,
state,
denoise,
l2i,
originalSize,
scaledSize,
});
g.upsertMetadata({ generation_mode: 'flux_txt2img' });
} else if (generationMode === 'img2img') {
assert(manager !== null);
const i2l = g.addNode({
type: 'flux_vae_encode',
id: getPrefixedId('flux_vae_encode'),
});
canvasOutput = await addImageToImage({
g,
state,
@@ -217,13 +207,14 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
i2l,
denoise,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
});
g.upsertMetadata({ generation_mode: 'flux_img2img' });
} else if (generationMode === 'inpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'flux_vae_encode',
id: getPrefixedId('flux_vae_encode'),
});
canvasOutput = await addInpaint({
g,
state,
@@ -233,13 +224,15 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'flux_inpaint' });
} else if (generationMode === 'outpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'flux_vae_encode',
id: getPrefixedId('flux_vae_encode'),
});
canvasOutput = await addOutpaint({
g,
state,
@@ -249,8 +242,6 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'flux_outpaint' });
@@ -353,10 +344,12 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
canvasOutput = addWatermarker(g, canvasOutput);
}
g.upsertMetadata(selectCanvasMetadata(state));
g.updateNode(canvasOutput, selectCanvasOutputFields(state));
if (selectActiveTab(state) === 'canvas') {
g.upsertMetadata(selectCanvasMetadata(state));
}
g.setMetadataReceivingNode(canvasOutput);
return {

View File

@@ -2,13 +2,15 @@ import { logger } from 'app/logging/logger';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types';
import { isFluxKontextAspectRatioID, isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types';
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
import type { ImageField } from 'features/nodes/types/common';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { selectCanvasOutputFields } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getOriginalAndScaledSizesForTextToImage,
selectCanvasOutputFields,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types';
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
import { t } from 'i18next';
@@ -29,10 +31,10 @@ export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn
log.debug({ generationMode, manager: manager?.id }, 'Building FLUX Kontext graph');
const canvas = selectCanvasSlice(state);
const refImages = selectRefImagesSlice(state);
const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state);
assert(isFluxKontextAspectRatioID(aspectRatio.id), 'FLUX Kontext does not support this aspect ratio');
const { bbox } = canvas;
const refImages = selectRefImagesSlice(state);
const validRefImages = refImages.entities
.filter((entity) => entity.isEnabled)
@@ -60,7 +62,7 @@ export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn
// @ts-expect-error: These nodes are not available in the OSS application
type: input_image ? 'flux_kontext_edit_image' : 'flux_kontext_generate_image',
model: zModelIdentifierField.parse(model),
aspect_ratio: bbox.aspectRatio.id,
aspect_ratio: aspectRatio.id,
input_image,
prompt_upsampling: true,
...selectCanvasOutputFields(state),
@@ -76,8 +78,8 @@ export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
g.upsertMetadata({
model: Graph.getModelMetadataField(model),
width: bbox.rect.width,
height: bbox.rect.height,
width: originalSize.width,
height: originalSize.height,
});
return {
g,

View File

@@ -1,11 +1,14 @@
import { logger } from 'app/logging/logger';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { isImagenAspectRatioID } from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getOriginalAndScaledSizesForTextToImage,
selectCanvasOutputFields,
selectPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types';
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
import { t } from 'i18next';
@@ -26,13 +29,12 @@ export const buildImagen3Graph = (arg: GraphBuilderArg): GraphBuilderReturn => {
throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen3' }));
}
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const prompts = selectPresetModifiedPrompts(state);
assert(isImagenAspectRatioID(bbox.aspectRatio.id), 'Imagen3 does not support this aspect ratio');
assert(prompts.positive.length > 0, 'Imagen3 requires positive prompt to have at least one character');
const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state);
assert(isImagenAspectRatioID(aspectRatio.id), 'Imagen3 does not support this aspect ratio');
const g = new Graph(getPrefixedId('imagen3_txt2img_graph'));
const positivePrompt = g.addNode({
id: getPrefixedId('positive_prompt'),
@@ -43,7 +45,7 @@ export const buildImagen3Graph = (arg: GraphBuilderArg): GraphBuilderReturn => {
type: 'google_imagen3_generate_image',
model: zModelIdentifierField.parse(model),
negative_prompt: prompts.negative,
aspect_ratio: bbox.aspectRatio.id,
aspect_ratio: aspectRatio.id,
// When enhance_prompt is true, Imagen3 will return a new image every time, ignoring the seed.
enhance_prompt: true,
...selectCanvasOutputFields(state),
@@ -60,10 +62,9 @@ export const buildImagen3Graph = (arg: GraphBuilderArg): GraphBuilderReturn => {
g.upsertMetadata({
negative_prompt: prompts.negative,
width: bbox.rect.width,
height: bbox.rect.height,
width: originalSize.width,
height: originalSize.height,
model: Graph.getModelMetadataField(model),
...selectCanvasMetadata(state),
});
return {

View File

@@ -1,11 +1,14 @@
import { logger } from 'app/logging/logger';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { isImagenAspectRatioID } from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import {
getOriginalAndScaledSizesForTextToImage,
selectCanvasOutputFields,
selectPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types';
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
import { t } from 'i18next';
@@ -25,14 +28,12 @@ export const buildImagen4Graph = (arg: GraphBuilderArg): GraphBuilderReturn => {
throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen4' }));
}
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const prompts = selectPresetModifiedPrompts(state);
assert(isImagenAspectRatioID(bbox.aspectRatio.id), 'Imagen4 does not support this aspect ratio');
assert(prompts.positive.length > 0, 'Imagen4 requires positive prompt to have at least one character');
const { originalSize, aspectRatio } = getOriginalAndScaledSizesForTextToImage(state);
assert(isImagenAspectRatioID(aspectRatio.id), 'Imagen4 does not support this aspect ratio');
const g = new Graph(getPrefixedId('imagen4_txt2img_graph'));
const positivePrompt = g.addNode({
id: getPrefixedId('positive_prompt'),
@@ -43,7 +44,7 @@ export const buildImagen4Graph = (arg: GraphBuilderArg): GraphBuilderReturn => {
type: 'google_imagen4_generate_image',
model: zModelIdentifierField.parse(model),
negative_prompt: prompts.negative,
aspect_ratio: bbox.aspectRatio.id,
aspect_ratio: aspectRatio.id,
// When enhance_prompt is true, Imagen4 will return a new image every time, ignoring the seed.
enhance_prompt: true,
...selectCanvasOutputFields(state),
@@ -60,10 +61,9 @@ export const buildImagen4Graph = (arg: GraphBuilderArg): GraphBuilderReturn => {
g.upsertMetadata({
negative_prompt: prompts.negative,
width: bbox.rect.width,
height: bbox.rect.height,
width: originalSize.width,
height: originalSize.height,
model: Graph.getModelMetadataField(model),
...selectCanvasMetadata(state),
});
return {

View File

@@ -15,12 +15,9 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
selectCanvasOutputFields,
selectOriginalAndScaledSizes,
selectPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -42,8 +39,6 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
const canvas = selectCanvasSlice(state);
const refImages = selectRefImagesSlice(state);
const { bbox } = canvas;
const {
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
@@ -57,7 +52,6 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
const fp32 = vaePrecision === 'fp32';
const prompts = selectPresetModifiedPrompts(state);
const { originalSize, scaledSize } = selectOriginalAndScaledSizes(state);
const g = new Graph(getPrefixedId('sd1_graph'));
const seed = g.addNode({
@@ -98,8 +92,6 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
const noise = g.addNode({
type: 'noise',
id: getPrefixedId('noise'),
width: scaledSize.width,
height: scaledSize.height,
use_cpu: shouldUseCpuNoise,
});
const denoise = g.addNode({
@@ -117,11 +109,6 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
id: getPrefixedId('l2i'),
fp32,
});
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
const vaeLoader =
vae?.base === model.base
? g.addNode({
@@ -150,8 +137,6 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
g.upsertMetadata({
cfg_scale,
cfg_rescale_multiplier,
width: originalSize.width,
height: originalSize.height,
negative_prompt: prompts.negative,
model: Graph.getModelMetadataField(model),
steps,
@@ -178,14 +163,19 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({
g,
state,
noise,
denoise,
l2i,
originalSize,
scaledSize,
});
g.upsertMetadata({ generation_mode: 'txt2img' });
} else if (generationMode === 'img2img') {
assert(manager !== null);
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
canvasOutput = await addImageToImage({
g,
state,
@@ -194,13 +184,15 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
i2l,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
});
g.upsertMetadata({ generation_mode: 'img2img' });
} else if (generationMode === 'inpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
canvasOutput = await addInpaint({
g,
state,
@@ -210,13 +202,16 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'inpaint' });
} else if (generationMode === 'outpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
canvasOutput = await addOutpaint({
g,
state,
@@ -226,8 +221,6 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'outpaint' });
@@ -317,10 +310,12 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
canvasOutput = addWatermarker(g, canvasOutput);
}
g.upsertMetadata(selectCanvasMetadata(state));
g.updateNode(canvasOutput, selectCanvasOutputFields(state));
if (selectActiveTab(state) === 'canvas') {
g.upsertMetadata(selectCanvasMetadata(state));
}
g.setMetadataReceivingNode(canvasOutput);
return {

View File

@@ -1,7 +1,7 @@
import { logger } from 'app/logging/logger';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { selectMainModelConfig, selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasMetadata, selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { selectCanvasMetadata } from 'features/controlLayers/store/selectors';
import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage';
import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
@@ -9,12 +9,9 @@ import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
selectCanvasOutputFields,
selectOriginalAndScaledSizes,
selectPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -31,13 +28,9 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
assert(model.base === 'sd-3');
const params = selectParamsSlice(state);
const canvas = selectCanvasSlice(state);
const { bbox } = canvas;
const { cfgScale: cfg_scale, steps, vae, t5EncoderModel, clipLEmbedModel, clipGEmbedModel } = params;
const { originalSize, scaledSize } = selectOriginalAndScaledSizes(state);
const prompts = selectPresetModifiedPrompts(state);
const g = new Graph(getPrefixedId('sd3_graph'));
@@ -78,17 +71,11 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
steps,
denoising_start: 0,
denoising_end: 1,
width: scaledSize.width,
height: scaledSize.height,
});
const l2i = g.addNode({
type: 'sd3_l2i',
id: getPrefixedId('l2i'),
});
const i2l = g.addNode({
type: 'sd3_i2l',
id: getPrefixedId('sd3_i2l'),
});
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'clip_l', posCond, 'clip_l');
@@ -108,8 +95,6 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
g.upsertMetadata({
cfg_scale,
width: originalSize.width,
height: originalSize.height,
negative_prompt: prompts.negative,
model: Graph.getModelMetadataField(model),
steps,
@@ -123,14 +108,17 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({
g,
state,
denoise,
l2i,
originalSize,
scaledSize,
});
g.upsertMetadata({ generation_mode: 'sd3_txt2img' });
} else if (generationMode === 'img2img') {
assert(manager !== null);
const i2l = g.addNode({
type: 'sd3_i2l',
id: getPrefixedId('sd3_i2l'),
});
canvasOutput = await addImageToImage({
g,
state,
@@ -139,13 +127,14 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
i2l,
denoise,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
});
g.upsertMetadata({ generation_mode: 'sd3_img2img' });
} else if (generationMode === 'inpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'sd3_i2l',
id: getPrefixedId('sd3_i2l'),
});
canvasOutput = await addInpaint({
g,
state,
@@ -155,13 +144,15 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'sd3_inpaint' });
} else if (generationMode === 'outpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'sd3_i2l',
id: getPrefixedId('sd3_i2l'),
});
canvasOutput = await addOutpaint({
g,
state,
@@ -171,8 +162,6 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'sd3_outpaint' });
@@ -188,10 +177,12 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise<GraphBuilderR
canvasOutput = addWatermarker(g, canvasOutput);
}
g.upsertMetadata(selectCanvasMetadata(state));
g.updateNode(canvasOutput, selectCanvasOutputFields(state));
if (selectActiveTab(state) === 'canvas') {
g.upsertMetadata(selectCanvasMetadata(state));
}
g.setMetadataReceivingNode(canvasOutput);
return {
g,

View File

@@ -15,12 +15,9 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import {
selectCanvasOutputFields,
selectOriginalAndScaledSizes,
selectPresetModifiedPrompts,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { Invocation } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -42,8 +39,6 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
const canvas = selectCanvasSlice(state);
const refImages = selectRefImagesSlice(state);
const { bbox } = canvas;
const {
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
@@ -56,7 +51,6 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
} = params;
const fp32 = vaePrecision === 'fp32';
const { originalSize, scaledSize } = selectOriginalAndScaledSizes(state);
const prompts = selectPresetModifiedPrompts(state);
const g = new Graph(getPrefixedId('sdxl_graph'));
@@ -98,8 +92,6 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
const noise = g.addNode({
type: 'noise',
id: getPrefixedId('noise'),
width: scaledSize.width,
height: scaledSize.height,
use_cpu: shouldUseCpuNoise,
});
const denoise = g.addNode({
@@ -115,11 +107,6 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
id: getPrefixedId('l2i'),
fp32,
});
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
const vaeLoader =
vae?.base === model.base
? g.addNode({
@@ -149,8 +136,6 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
g.upsertMetadata({
cfg_scale,
cfg_rescale_multiplier,
width: originalSize.width,
height: originalSize.height,
model: Graph.getModelMetadataField(model),
steps,
rand_device: shouldUseCpuNoise ? 'cpu' : 'cuda',
@@ -188,14 +173,19 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage({
g,
state,
noise,
denoise,
l2i,
originalSize,
scaledSize,
});
g.upsertMetadata({ generation_mode: 'sdxl_txt2img' });
} else if (generationMode === 'img2img') {
assert(manager !== null);
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
canvasOutput = await addImageToImage({
g,
state,
@@ -204,13 +194,15 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
i2l,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
});
g.upsertMetadata({ generation_mode: 'sdxl_img2img' });
} else if (generationMode === 'inpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
canvasOutput = await addInpaint({
g,
state,
@@ -220,13 +212,16 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'sdxl_inpaint' });
} else if (generationMode === 'outpaint') {
assert(manager !== null);
const i2l = g.addNode({
type: 'i2l',
id: getPrefixedId('i2l'),
fp32,
});
canvasOutput = await addOutpaint({
g,
state,
@@ -236,8 +231,6 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
seed,
});
g.upsertMetadata({ generation_mode: 'sdxl_outpaint' });
@@ -326,10 +319,12 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
canvasOutput = addWatermarker(g, canvasOutput);
}
g.upsertMetadata(selectCanvasMetadata(state));
g.updateNode(canvasOutput, selectCanvasOutputFields(state));
if (selectActiveTab(state) === 'canvas') {
g.upsertMetadata(selectCanvasMetadata(state));
}
g.setMetadataReceivingNode(canvasOutput);
return {
g,

View File

@@ -103,27 +103,43 @@ export const selectPresetModifiedPrompts = createSelector(
}
);
export const selectOriginalAndScaledSizes = createSelector(
[selectActiveTab, selectParamsSlice, selectCanvasSlice],
(tab, params, canvas) => {
if (tab === 'generate') {
const { width, height } = params.dimensions.rect;
const { aspectRatio } = params.dimensions;
return {
originalSize: { width, height },
scaledSize: { width, height },
aspectRatio,
};
} else {
// tab === 'canvas'
const { width, height } = canvas.bbox.rect;
const { aspectRatio } = canvas.bbox;
const originalSize = { width, height };
const scaledSize = ['auto', 'manual'].includes(canvas.bbox.scaleMethod) ? canvas.bbox.scaledSize : originalSize;
return { originalSize, scaledSize, aspectRatio };
}
export const getOriginalAndScaledSizesForTextToImage = (state: RootState) => {
const tab = selectActiveTab(state);
const params = selectParamsSlice(state);
const canvas = selectCanvasSlice(state);
if (tab === 'canvas') {
const { rect, aspectRatio } = canvas.bbox;
const { width, height } = rect;
const originalSize = { width, height };
const scaledSize = ['auto', 'manual'].includes(canvas.bbox.scaleMethod) ? canvas.bbox.scaledSize : originalSize;
return { originalSize, scaledSize, aspectRatio };
} else if (tab === 'generate') {
const { rect, aspectRatio } = params.dimensions;
const { width, height } = rect;
return {
originalSize: { width, height },
scaledSize: { width, height },
aspectRatio,
};
}
);
assert(false, `Cannot get sizes for tab ${tab} - this function is only for the Canvas or Generate tabs`);
};
export const getOriginalAndScaledSizesForOtherModes = (state: RootState) => {
const tab = selectActiveTab(state);
const canvas = selectCanvasSlice(state);
assert(tab === 'canvas', `Cannot get sizes for tab ${tab} - this function is only for the Canvas tab`);
const { rect, aspectRatio } = canvas.bbox;
const { width, height } = rect;
const originalSize = { width, height };
const scaledSize = ['auto', 'manual'].includes(canvas.bbox.scaleMethod) ? canvas.bbox.scaledSize : originalSize;
return { originalSize, scaledSize, aspectRatio, rect };
};
export const getInfill = (
g: Graph,