Compare commits

...

3 Commits

Author SHA1 Message Date
psychedelicious
b7bcdce31a feat(ui): save pre-downscaled image output (wip) 2024-10-28 18:38:31 +10:00
psychedelicious
455f2a35df feat(ui): graph builders use objects for arg instead of many args 2024-10-28 17:36:29 +10:00
psychedelicious
c8950f5959 feat(ui): extract canvas output node prefix to constant 2024-10-28 17:36:01 +10:00
10 changed files with 270 additions and 179 deletions

View File

@@ -359,6 +359,7 @@ export type StagingAreaImage = {
imageDTO: ImageDTO;
offsetX: number;
offsetY: number;
preDownscaleImageDTO: ImageDTO | null;
};
const zAspectRatioID = z.enum(['Free', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']);

View File

@@ -2,22 +2,36 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { CanvasState, Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import { addImageToLatents } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addImageToImage = async (
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasState['bbox'],
denoising_start: number,
fp32: boolean
): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'>> => {
type AddImageToImageArg = {
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
bbox: CanvasState['bbox'];
denoising_start: number;
fp32: boolean;
};
export const addImageToImage = async ({
g,
manager,
l2i,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
denoising_start,
fp32,
}: AddImageToImageArg): Promise<CanvasOutputs> => {
denoise.denoising_start = denoising_start;
const { image_name } = await manager.compositor.getCompositeRasterLayerImageDTO(bbox.rect);
@@ -44,13 +58,12 @@ export const addImageToImage = async (
g.addEdge(i2l, 'latents', denoise, 'latents');
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
// This is the new output node
return resizeImageToOriginalSize;
return { scaled: resizeImageToOriginalSize, unscaled: l2i };
} else {
// No need to resize, just decode
const i2l = addImageToLatents(g, l2i.type === 'flux_vae_decode', fp32, image_name);
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
return l2i;
return { scaled: l2i, unscaled: l2i };
}
};

View File

@@ -6,23 +6,38 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import { addImageToLatents } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addInpaint = async (
state: RootState,
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
denoising_start: number,
fp32: boolean
): Promise<Invocation<'canvas_v2_mask_and_crop'>> => {
type AddInpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
fp32: boolean;
};
export const addInpaint = async ({
state,
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32,
}: AddInpaintArg): Promise<CanvasOutputs> => {
denoise.denoising_start = denoising_start;
const params = selectParamsSlice(state);
@@ -55,16 +70,6 @@ export const addInpaint = async (
type: 'img_resize',
...scaledSize,
});
const resizeImageToOriginalSize = g.addNode({
id: getPrefixedId('resize_image_to_original_size'),
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
id: getPrefixedId('resize_mask_to_original_size'),
type: 'img_resize',
...originalSize,
});
const createGradientMask = g.addNode({
id: getPrefixedId('create_gradient_mask'),
type: 'create_gradient_mask',
@@ -78,6 +83,11 @@ export const addInpaint = async (
type: 'canvas_v2_mask_and_crop',
mask_blur: params.maskBlur,
});
const resizeOutput = g.addNode({
id: getPrefixedId('resize_output'),
type: 'img_resize',
...originalSize,
});
// Resize initial image and mask to scaled size, feed into to gradient mask
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
@@ -94,21 +104,20 @@ export const addInpaint = async (
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
// Paste the generated masked image back onto the original image
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
// Finally, resize the output back to the original size
g.addEdge(canvasPasteBack, 'image', resizeOutput, 'image');
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
// to canvas but not outputting only masked regions
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
canvasPasteBack.source_image = { image_name: initialImage.image_name };
g.addEdge(resizeImageToScaledSize, 'image', canvasPasteBack, 'source_image');
}
return canvasPasteBack;
return { unscaled: canvasPasteBack, scaled: resizeOutput };
} else {
// No scale before processing, much simpler
const i2l = addImageToLatents(g, modelLoader.type === 'flux_model_loader', fp32, initialImage.image_name);
@@ -152,6 +161,6 @@ export const addInpaint = async (
canvasPasteBack.source_image = { image_name: initialImage.image_name };
}
return canvasPasteBack;
return { unscaled: canvasPasteBack, scaled: canvasPasteBack };
}
};

View File

@@ -6,23 +6,38 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import { addImageToLatents, getInfill } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addOutpaint = async (
state: RootState,
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
denoising_start: number,
fp32: boolean
): Promise<Invocation<'canvas_v2_mask_and_crop'>> => {
type AddOutpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
fp32: boolean;
};
export const addOutpaint = async ({
state,
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32,
}: AddOutpaintArg): Promise<CanvasOutputs> => {
denoise.denoising_start = denoising_start;
const params = selectParamsSlice(state);
@@ -98,40 +113,33 @@ export const addOutpaint = async (
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
// Resize the output image back to the original size
const resizeOutputImageToOriginalSize = g.addNode({
id: getPrefixedId('resize_image_to_original_size'),
type: 'img_resize',
...originalSize,
});
const resizeOutputMaskToOriginalSize = g.addNode({
id: getPrefixedId('resize_mask_to_original_size'),
type: 'img_resize',
...originalSize,
});
const canvasPasteBack = g.addNode({
id: getPrefixedId('canvas_v2_mask_and_crop'),
type: 'canvas_v2_mask_and_crop',
mask_blur: params.maskBlur,
});
const resizeOutput = g.addNode({
id: getPrefixedId('resize_output'),
type: 'img_resize',
...originalSize,
});
// Resize initial image and mask to scaled size, feed into to gradient mask
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeOutputImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeOutputMaskToOriginalSize, 'image');
// Paste the generated masked image back onto the original image
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeOutputImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
g.addEdge(resizeOutputMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
// Finally, resize the output back to the original size
g.addEdge(canvasPasteBack, 'image', resizeOutput, 'image');
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
// to canvas but not outputting only masked regions
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
canvasPasteBack.source_image = { image_name: initialImage.image_name };
g.addEdge(resizeInputImageToScaledSize, 'image', canvasPasteBack, 'source_image');
}
return canvasPasteBack;
return { unscaled: canvasPasteBack, scaled: resizeOutput };
} else {
infill.image = { image_name: initialImage.image_name };
// No scale before processing, much simpler
@@ -186,6 +194,6 @@ export const addOutpaint = async (
canvasPasteBack.source_image = { image_name: initialImage.image_name };
}
return canvasPasteBack;
return { unscaled: canvasPasteBack, scaled: canvasPasteBack };
}
};

View File

@@ -1,15 +1,18 @@
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addTextToImage = (
g: Graph,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
originalSize: Dimensions,
scaledSize: Dimensions
): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'> => {
type AddTextToImageArg = {
g: Graph;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
originalSize: Dimensions;
scaledSize: Dimensions;
};
export const addTextToImage = ({ g, l2i, originalSize, scaledSize }: AddTextToImageArg): CanvasOutputs => {
if (!isEqual(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({
@@ -19,8 +22,8 @@ export const addTextToImage = (
});
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
return resizeImageToOriginalSize;
return { scaled: resizeImageToOriginalSize, unscaled: l2i };
} else {
return l2i;
return { scaled: l2i, unscaled: l2i };
}
};

View File

@@ -14,9 +14,16 @@ import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_SCALED_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addControlNets } from './addControlAdapters';
@@ -74,7 +81,7 @@ export const buildFLUXGraph = async (
prompt: positivePrompt,
});
const noise = g.addNode({
const denoise = g.addNode({
type: 'flux_denoise',
id: getPrefixedId('flux_denoise'),
guidance,
@@ -91,23 +98,19 @@ export const buildFLUXGraph = async (
id: getPrefixedId('flux_vae_decode'),
});
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'transformer', noise, 'transformer');
g.addEdge(modelLoader, 'vae', noise, 'controlnet_vae');
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'vae', denoise, 'controlnet_vae');
g.addEdge(modelLoader, 'vae', l2i, 'vae');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 't5_encoder', posCond, 't5_encoder');
g.addEdge(modelLoader, 'max_seq_len', posCond, 't5_max_seq_len');
addFLUXLoRAs(state, g, noise, modelLoader, posCond);
addFLUXLoRAs(state, g, denoise, modelLoader, posCond);
g.addEdge(posCond, 'conditioning', noise, 'positive_text_conditioning');
g.addEdge(posCond, 'conditioning', denoise, 'positive_text_conditioning');
g.addEdge(noise, 'latents', l2i, 'latents');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'flux');
@@ -126,59 +129,63 @@ export const buildFLUXGraph = async (
clip_embed_model: clipEmbedModel,
});
let denoisingStart: number;
let denoising_start: number;
if (optimizedDenoisingEnabled) {
// We rescale the img2imgStrength (with exponent 0.2) to effectively use the entire range [0, 1] and make the scale
// more user-friendly for FLUX. Without this, most of the 'change' is concentrated in the high denoise strength
// range (>0.9).
denoisingStart = 1 - img2imgStrength ** 0.2;
denoising_start = 1 - img2imgStrength ** 0.2;
} else {
denoisingStart = 1 - img2imgStrength;
denoising_start = 1 - img2imgStrength;
}
let canvasOutputs: CanvasOutputs;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutputs = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutputs = await addImageToImage({
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutputs = await addInpaint({
state,
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutputs = await addOutpaint({
state,
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -194,7 +201,7 @@ export const buildFLUXGraph = async (
modelConfig.base
);
if (controlNetResult.addedControlNets > 0) {
g.addEdge(controlNetCollector, 'collection', noise, 'control');
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
} else {
g.deleteNode(controlNetCollector.id);
}
@@ -221,24 +228,24 @@ export const buildFLUXGraph = async (
g.addEdge(modelLoader, 'clip', negCond, 'clip');
g.addEdge(modelLoader, 't5_encoder', negCond, 't5_encoder');
g.addEdge(modelLoader, 'max_seq_len', negCond, 't5_max_seq_len');
g.addEdge(negCond, 'conditioning', noise, 'negative_text_conditioning');
g.addEdge(negCond, 'conditioning', denoise, 'negative_text_conditioning');
g.updateNode(noise, {
g.updateNode(denoise, {
cfg_scale: 3,
cfg_scale_start_step,
cfg_scale_end_step,
});
g.addEdge(ipAdapterCollector, 'collection', noise, 'ip_adapter');
g.addEdge(ipAdapterCollector, 'collection', denoise, 'ip_adapter');
} else {
g.deleteNode(ipAdapterCollector.id);
}
if (state.system.shouldUseNSFWChecker) {
canvasOutput = addNSFWChecker(g, canvasOutput);
canvasOutputs.scaled = addNSFWChecker(g, canvasOutputs.scaled);
}
if (state.system.shouldUseWatermarker) {
canvasOutput = addWatermarker(g, canvasOutput);
canvasOutputs.scaled = addWatermarker(g, canvasOutputs.scaled);
}
// This image will be staged, should not be saved to the gallery or added to a board.
@@ -249,13 +256,13 @@ export const buildFLUXGraph = async (
g.upsertMetadata(selectCanvasMetadata(state));
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
g.updateNode(canvasOutputs.scaled, {
id: getPrefixedId(CANVAS_SCALED_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,
});
g.setMetadataReceivingNode(canvasOutput);
return { g, noise, posCond };
g.setMetadataReceivingNode(canvasOutputs.scaled);
return { g, noise: denoise, posCond };
};

View File

@@ -18,9 +18,16 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_SCALED_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
@@ -120,10 +127,6 @@ export const buildSD1Graph = async (
})
: null;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', clipSkip, 'clip');
g.addEdge(clipSkip, 'clip', posCond, 'clip');
@@ -165,10 +168,14 @@ export const buildSD1Graph = async (
> = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
const denoising_start = 1 - params.img2imgStrength;
let canvasOutputs: CanvasOutputs;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutputs = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutputs = await addImageToImage({
g,
manager,
l2i,
@@ -177,11 +184,11 @@ export const buildSD1Graph = async (
originalSize,
scaledSize,
bbox,
1 - params.img2imgStrength,
vaePrecision === 'fp32'
);
denoising_start,
fp32: vaePrecision === 'fp32',
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutputs = await addInpaint({
state,
g,
manager,
@@ -191,11 +198,11 @@ export const buildSD1Graph = async (
modelLoader,
originalSize,
scaledSize,
1 - params.img2imgStrength,
vaePrecision === 'fp32'
);
denoising_start,
fp32: vaePrecision === 'fp32',
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutputs = await addOutpaint({
state,
g,
manager,
@@ -205,9 +212,11 @@ export const buildSD1Graph = async (
modelLoader,
originalSize,
scaledSize,
1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -275,11 +284,11 @@ export const buildSD1Graph = async (
}
if (state.system.shouldUseNSFWChecker) {
canvasOutput = addNSFWChecker(g, canvasOutput);
canvasOutputs.scaled = addNSFWChecker(g, canvasOutputs.scaled);
}
if (state.system.shouldUseWatermarker) {
canvasOutput = addWatermarker(g, canvasOutput);
canvasOutputs.scaled = addWatermarker(g, canvasOutputs.scaled);
}
// This image will be staged, should not be saved to the gallery or added to a board.
@@ -290,13 +299,13 @@ export const buildSD1Graph = async (
g.upsertMetadata(selectCanvasMetadata(state));
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
g.updateNode(canvasOutputs.scaled, {
id: getPrefixedId(CANVAS_SCALED_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,
});
g.setMetadataReceivingNode(canvasOutput);
g.setMetadataReceivingNode(canvasOutputs.scaled);
return { g, noise, posCond };
};

View File

@@ -18,9 +18,17 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import type { CanvasOutputs } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_SCALED_OUTPUT_PREFIX,
CANVAS_UNSCALED_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
@@ -118,10 +126,6 @@ export const buildSDXLGraph = async (
})
: null;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 'clip', negCond, 'clip');
@@ -168,10 +172,16 @@ export const buildSDXLGraph = async (
await addSDXLRefiner(state, g, denoise, seamless, posCond, negCond, l2i);
}
const denoising_start = refinerModel
? Math.min(refinerStart, 1 - params.img2imgStrength)
: 1 - params.img2imgStrength;
let canvasOutputs: CanvasOutputs;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutputs = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutputs = await addImageToImage({
g,
manager,
l2i,
@@ -180,11 +190,11 @@ export const buildSDXLGraph = async (
originalSize,
scaledSize,
bbox,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutputs = await addInpaint({
state,
g,
manager,
@@ -194,11 +204,11 @@ export const buildSDXLGraph = async (
modelLoader,
originalSize,
scaledSize,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutputs = await addOutpaint({
state,
g,
manager,
@@ -208,9 +218,11 @@ export const buildSDXLGraph = async (
modelLoader,
originalSize,
scaledSize,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -278,11 +290,11 @@ export const buildSDXLGraph = async (
}
if (state.system.shouldUseNSFWChecker) {
canvasOutput = addNSFWChecker(g, canvasOutput);
canvasOutputs.scaled = addNSFWChecker(g, canvasOutputs.scaled);
}
if (state.system.shouldUseWatermarker) {
canvasOutput = addWatermarker(g, canvasOutput);
canvasOutputs.scaled = addWatermarker(g, canvasOutputs.scaled);
}
// This image will be staged, should not be saved to the gallery or added to a board.
@@ -293,13 +305,18 @@ export const buildSDXLGraph = async (
g.upsertMetadata(selectCanvasMetadata(state));
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
g.updateNode(canvasOutputs.scaled, {
id: getPrefixedId(CANVAS_SCALED_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,
});
g.setMetadataReceivingNode(canvasOutput);
g.updateNode(canvasOutputs.unscaled, {
id: getPrefixedId(CANVAS_UNSCALED_OUTPUT_PREFIX),
board,
});
g.setMetadataReceivingNode(canvasOutputs.scaled);
return { g, noise, posCond };
};

View File

@@ -129,3 +129,13 @@ export const addImageToLatents = (g: Graph, isFlux: boolean, fp32: boolean, imag
return g.addNode({ id: 'i2l', type: 'i2l', fp32, image: image_name ? { image_name } : undefined });
}
};
export const CANVAS_SCALED_OUTPUT_PREFIX = 'canvas_scaled_output';
export const CANVAS_UNSCALED_OUTPUT_PREFIX = 'canvas_unscaled_output';
export type CanvasOutputs = {
scaled: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
>;
unscaled: Invocation<'l2i' | 'flux_vae_decode' | 'canvas_v2_mask_and_crop'>;
};

View File

@@ -6,6 +6,10 @@ import { stagingAreaImageStaged } from 'features/controlLayers/store/canvasStagi
import { boardIdSelected, galleryViewChanged, imageSelected, offsetChanged } from 'features/gallery/store/gallerySlice';
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
import { zNodeStatus } from 'features/nodes/types/invocation';
import {
CANVAS_SCALED_OUTPUT_PREFIX,
CANVAS_UNSCALED_OUTPUT_PREFIX,
} from 'features/nodes/util/graph/graphBuilderUtils';
import { boardsApi } from 'services/api/endpoints/boards';
import { getImageDTOSafe, imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO, S } from 'services/api/types';
@@ -15,7 +19,11 @@ import { $lastProgressEvent } from 'services/events/stores';
const log = logger('events');
const isCanvasOutputNode = (data: S['InvocationCompleteEvent']) => {
return data.invocation_source_id.split(':')[0] === 'canvas_output';
return data.invocation_source_id.split(':')[0] === CANVAS_SCALED_OUTPUT_PREFIX;
};
const isCanvasUnscaledOutputNode = (data: S['InvocationCompleteEvent']) => {
return data.invocation_source_id.split(':')[0] === CANVAS_UNSCALED_OUTPUT_PREFIX;
};
const nodeTypeDenylist = ['load_image', 'image'];
@@ -124,9 +132,15 @@ export const buildOnInvocationComplete = (getState: () => RootState, dispatch: A
// TODO(psyche): Can/should we let canvas handle this itself?
if (isCanvasOutputNode(data)) {
if (data.result.type === 'image_output') {
dispatch(stagingAreaImageStaged({ stagingAreaImage: { imageDTO, offsetX: 0, offsetY: 0 } }));
dispatch(
stagingAreaImageStaged({
stagingAreaImage: { imageDTO, offsetX: 0, offsetY: 0, preDownscaleImageDTO: null },
})
);
}
addImageToGallery(data, imageDTO);
} else if (isCanvasUnscaledOutputNode(data)) {
console.log(data.result);
}
} else if (!imageDTO.is_intermediate) {
// Desintaion is gallery