mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-02-03 16:35:03 -05:00
refactor(ui): use new compositing nodes for inpaint/outpaint graphs
This commit is contained in:
@@ -6,7 +6,7 @@ import { getControlLayerWarnings } from 'features/controlLayers/store/validators
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation, MainModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('system');
|
||||
@@ -114,7 +114,7 @@ type AddControlLoRAArg = {
|
||||
entities: CanvasControlLayerState[];
|
||||
g: Graph;
|
||||
rect: Rect;
|
||||
model: ParameterModel;
|
||||
model: MainModelConfig;
|
||||
denoise: Invocation<'flux_denoise'>;
|
||||
};
|
||||
|
||||
@@ -129,9 +129,9 @@ export const addControlLoRA = async ({ manager, entities, g, rect, model, denois
|
||||
// No valid control LoRA found
|
||||
return;
|
||||
}
|
||||
if (validControlLayers.length > 1) {
|
||||
throw new Error('Cannot add more than one FLUX control LoRA.');
|
||||
}
|
||||
|
||||
assert(model.variant !== 'inpaint', 'FLUX Control LoRA is not compatible with FLUX Fill.');
|
||||
assert(validControlLayers.length <= 1, 'Cannot add more than one FLUX control LoRA.');
|
||||
|
||||
const getImageDTOResult = await withResultAsync(() => {
|
||||
const adapter = manager.adapters.controlLayers.get(validControlLayer.id);
|
||||
|
||||
@@ -39,7 +39,7 @@ export const addInpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
}: AddInpaintArg): Promise<Invocation<'canvas_v2_mask_and_crop' | 'img_resize'>> => {
|
||||
}: AddInpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
const params = selectParamsSlice(state);
|
||||
@@ -104,10 +104,10 @@ export const addInpaint = async ({
|
||||
edge_radius: params.canvasCoherenceEdgeSize,
|
||||
fp32,
|
||||
});
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: getPrefixedId('canvas_v2_mask_and_crop'),
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
mask_blur: params.maskBlur,
|
||||
const expandMask = g.addNode({
|
||||
type: 'expand_mask_with_fade',
|
||||
id: getPrefixedId('expand_mask_with_fade'),
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
@@ -128,18 +128,31 @@ export const addInpaint = async ({
|
||||
// After denoising, resize the image and mask back to original size
|
||||
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', expandMask, 'mask');
|
||||
|
||||
// Finally, paste the generated masked image back onto the original image
|
||||
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
|
||||
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
|
||||
|
||||
// After denoising, resize the image and mask back to original size
|
||||
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
|
||||
// to canvas but not outputting only masked regions
|
||||
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
|
||||
canvasPasteBack.source_image = { image_name: initialImage.image_name };
|
||||
const imageLayerBlend = g.addNode({
|
||||
type: 'invokeai_img_blend',
|
||||
id: getPrefixedId('image_layer_blend'),
|
||||
layer_base: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(resizeImageToOriginalSize, 'image', imageLayerBlend, 'layer_upper');
|
||||
g.addEdge(resizeMaskToOriginalSize, 'image', imageLayerBlend, 'mask');
|
||||
return imageLayerBlend;
|
||||
} else {
|
||||
// Otherwise, just apply the mask
|
||||
const applyMaskToImage = g.addNode({
|
||||
type: 'apply_mask_to_image',
|
||||
id: getPrefixedId('apply_mask_to_image'),
|
||||
invert_mask: true,
|
||||
});
|
||||
g.addEdge(resizeMaskToOriginalSize, 'image', applyMaskToImage, 'mask');
|
||||
g.addEdge(resizeImageToOriginalSize, 'image', applyMaskToImage, 'image');
|
||||
return applyMaskToImage;
|
||||
}
|
||||
|
||||
return canvasPasteBack;
|
||||
} else {
|
||||
// No scale before processing, much simpler
|
||||
const i2l = g.addNode({
|
||||
@@ -164,11 +177,6 @@ export const addInpaint = async ({
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
});
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: getPrefixedId('canvas_v2_mask_and_crop'),
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
mask_blur: params.maskBlur,
|
||||
});
|
||||
|
||||
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
@@ -178,16 +186,35 @@ export const addInpaint = async ({
|
||||
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
|
||||
}
|
||||
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
|
||||
|
||||
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
|
||||
const expandMask = g.addNode({
|
||||
type: 'expand_mask_with_fade',
|
||||
id: getPrefixedId('expand_mask_with_fade'),
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', expandMask, 'mask');
|
||||
|
||||
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
|
||||
// to canvas but not outputting only masked regions
|
||||
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
|
||||
canvasPasteBack.source_image = { image_name: initialImage.image_name };
|
||||
const imageLayerBlend = g.addNode({
|
||||
type: 'invokeai_img_blend',
|
||||
id: getPrefixedId('image_layer_blend'),
|
||||
layer_base: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(l2i, 'image', imageLayerBlend, 'layer_upper');
|
||||
g.addEdge(expandMask, 'image', imageLayerBlend, 'mask');
|
||||
return imageLayerBlend;
|
||||
} else {
|
||||
// Otherwise, just apply the mask
|
||||
const applyMaskToImage = g.addNode({
|
||||
type: 'apply_mask_to_image',
|
||||
id: getPrefixedId('apply_mask_to_image'),
|
||||
invert_mask: true,
|
||||
});
|
||||
g.addEdge(expandMask, 'image', applyMaskToImage, 'mask');
|
||||
g.addEdge(l2i, 'image', applyMaskToImage, 'image');
|
||||
return applyMaskToImage;
|
||||
}
|
||||
|
||||
return canvasPasteBack;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -11,7 +11,14 @@ import type { Invocation } from 'services/api/types';
|
||||
export const addNSFWChecker = (
|
||||
g: Graph,
|
||||
imageOutput: Invocation<
|
||||
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode' | 'sd3_l2i'
|
||||
| 'l2i'
|
||||
| 'img_nsfw'
|
||||
| 'img_watermark'
|
||||
| 'img_resize'
|
||||
| 'invokeai_img_blend'
|
||||
| 'apply_mask_to_image'
|
||||
| 'flux_vae_decode'
|
||||
| 'sd3_l2i'
|
||||
>
|
||||
): Invocation<'img_nsfw'> => {
|
||||
const nsfw = g.addNode({
|
||||
|
||||
@@ -40,7 +40,7 @@ export const addOutpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
}: AddOutpaintArg): Promise<Invocation<'canvas_v2_mask_and_crop' | 'img_resize'>> => {
|
||||
}: AddOutpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
const params = selectParamsSlice(state);
|
||||
@@ -142,29 +142,39 @@ export const addOutpaint = async ({
|
||||
type: 'img_resize',
|
||||
...originalSize,
|
||||
});
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: getPrefixedId('canvas_v2_mask_and_crop'),
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
mask_blur: params.maskBlur,
|
||||
const expandMask = g.addNode({
|
||||
type: 'expand_mask_with_fade',
|
||||
id: getPrefixedId('expand_mask_with_fade'),
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
|
||||
// After denoising, resize the image and mask back to original size
|
||||
g.addEdge(l2i, 'image', resizeOutputImageToOriginalSize, 'image');
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', resizeOutputMaskToOriginalSize, 'image');
|
||||
|
||||
// Finally, paste the generated masked image back onto the original image
|
||||
g.addEdge(resizeOutputImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
|
||||
g.addEdge(resizeOutputMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
|
||||
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', expandMask, 'mask');
|
||||
g.addEdge(expandMask, 'image', resizeOutputMaskToOriginalSize, 'image');
|
||||
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
|
||||
// to canvas but not outputting only masked regions
|
||||
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
|
||||
canvasPasteBack.source_image = { image_name: initialImage.image_name };
|
||||
const imageLayerBlend = g.addNode({
|
||||
type: 'invokeai_img_blend',
|
||||
id: getPrefixedId('image_layer_blend'),
|
||||
layer_base: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(resizeOutputImageToOriginalSize, 'image', imageLayerBlend, 'layer_upper');
|
||||
g.addEdge(resizeOutputMaskToOriginalSize, 'image', imageLayerBlend, 'mask');
|
||||
return imageLayerBlend;
|
||||
} else {
|
||||
// Otherwise, just apply the mask
|
||||
const applyMaskToImage = g.addNode({
|
||||
type: 'apply_mask_to_image',
|
||||
id: getPrefixedId('apply_mask_to_image'),
|
||||
invert_mask: true,
|
||||
});
|
||||
g.addEdge(resizeOutputMaskToOriginalSize, 'image', applyMaskToImage, 'mask');
|
||||
g.addEdge(resizeOutputImageToOriginalSize, 'image', applyMaskToImage, 'image');
|
||||
return applyMaskToImage;
|
||||
}
|
||||
|
||||
return canvasPasteBack;
|
||||
} else {
|
||||
infill.image = { image_name: initialImage.image_name };
|
||||
// No scale before processing, much simpler
|
||||
@@ -197,11 +207,6 @@ export const addOutpaint = async ({
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
});
|
||||
const canvasPasteBack = g.addNode({
|
||||
id: getPrefixedId('canvas_v2_mask_and_crop'),
|
||||
type: 'canvas_v2_mask_and_crop',
|
||||
mask_blur: params.maskBlur,
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
g.addEdge(maskCombine, 'image', createGradientMask, 'mask');
|
||||
@@ -214,15 +219,35 @@ export const addOutpaint = async ({
|
||||
}
|
||||
|
||||
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
|
||||
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
|
||||
|
||||
const expandMask = g.addNode({
|
||||
type: 'expand_mask_with_fade',
|
||||
id: getPrefixedId('expand_mask_with_fade'),
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
g.addEdge(createGradientMask, 'expanded_mask_area', expandMask, 'mask');
|
||||
|
||||
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
|
||||
// to canvas but not outputting only masked regions
|
||||
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
|
||||
canvasPasteBack.source_image = { image_name: initialImage.image_name };
|
||||
const imageLayerBlend = g.addNode({
|
||||
type: 'invokeai_img_blend',
|
||||
id: getPrefixedId('image_layer_blend'),
|
||||
layer_base: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(l2i, 'image', imageLayerBlend, 'layer_upper');
|
||||
g.addEdge(expandMask, 'image', imageLayerBlend, 'mask');
|
||||
return imageLayerBlend;
|
||||
} else {
|
||||
// Otherwise, just apply the mask
|
||||
const applyMaskToImage = g.addNode({
|
||||
type: 'apply_mask_to_image',
|
||||
id: getPrefixedId('apply_mask_to_image'),
|
||||
invert_mask: true,
|
||||
});
|
||||
g.addEdge(expandMask, 'image', applyMaskToImage, 'mask');
|
||||
g.addEdge(l2i, 'image', applyMaskToImage, 'image');
|
||||
return applyMaskToImage;
|
||||
}
|
||||
|
||||
return canvasPasteBack;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -11,7 +11,14 @@ import type { Invocation } from 'services/api/types';
|
||||
export const addWatermarker = (
|
||||
g: Graph,
|
||||
imageOutput: Invocation<
|
||||
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode' | 'sd3_l2i'
|
||||
| 'l2i'
|
||||
| 'img_nsfw'
|
||||
| 'img_watermark'
|
||||
| 'img_resize'
|
||||
| 'invokeai_img_blend'
|
||||
| 'apply_mask_to_image'
|
||||
| 'flux_vae_decode'
|
||||
| 'sd3_l2i'
|
||||
>
|
||||
): Invocation<'img_watermark'> => {
|
||||
const watermark = g.addNode({
|
||||
|
||||
@@ -170,7 +170,14 @@ export const buildSD1Graph = async (
|
||||
const denoising_start = 1 - params.img2imgStrength;
|
||||
|
||||
let canvasOutput: Invocation<
|
||||
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode' | 'sd3_l2i'
|
||||
| 'l2i'
|
||||
| 'img_nsfw'
|
||||
| 'img_watermark'
|
||||
| 'img_resize'
|
||||
| 'invokeai_img_blend'
|
||||
| 'apply_mask_to_image'
|
||||
| 'flux_vae_decode'
|
||||
| 'sd3_l2i'
|
||||
> = l2i;
|
||||
|
||||
if (generationMode === 'txt2img') {
|
||||
|
||||
@@ -135,7 +135,14 @@ export const buildSD3Graph = async (
|
||||
}
|
||||
|
||||
let canvasOutput: Invocation<
|
||||
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode' | 'sd3_l2i'
|
||||
| 'l2i'
|
||||
| 'img_nsfw'
|
||||
| 'img_watermark'
|
||||
| 'img_resize'
|
||||
| 'invokeai_img_blend'
|
||||
| 'apply_mask_to_image'
|
||||
| 'flux_vae_decode'
|
||||
| 'sd3_l2i'
|
||||
> = l2i;
|
||||
|
||||
if (generationMode === 'txt2img') {
|
||||
|
||||
@@ -175,7 +175,14 @@ export const buildSDXLGraph = async (
|
||||
: 1 - params.img2imgStrength;
|
||||
|
||||
let canvasOutput: Invocation<
|
||||
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode' | 'sd3_l2i'
|
||||
| 'l2i'
|
||||
| 'img_nsfw'
|
||||
| 'img_watermark'
|
||||
| 'img_resize'
|
||||
| 'invokeai_img_blend'
|
||||
| 'apply_mask_to_image'
|
||||
| 'flux_vae_decode'
|
||||
| 'sd3_l2i'
|
||||
> = l2i;
|
||||
|
||||
if (generationMode === 'txt2img') {
|
||||
|
||||
Reference in New Issue
Block a user