diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts index 79dec9a62c..77a603e37d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearBatchConfig.ts @@ -1,14 +1,11 @@ -import { NUMPY_RAND_MAX, NUMPY_RAND_MIN } from 'app/constants'; import type { RootState } from 'app/store/store'; import { generateSeeds } from 'common/util/generateSeeds'; -import randomInt from 'common/util/randomInt'; import { range } from 'es-toolkit/compat'; import type { SeedBehaviour } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; import type { ModelIdentifierField } from 'features/nodes/types/common'; -import type { FieldIdentifier } from 'features/nodes/types/field'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; import type { components } from 'services/api/schema'; -import type { Batch, EnqueueBatchArg } from 'services/api/types'; +import type { Batch, EnqueueBatchArg, Invocation } from 'services/api/types'; import { assert } from 'tsafe'; const getExtendedPrompts = (arg: { @@ -31,13 +28,13 @@ export const prepareLinearUIBatch = (arg: { state: RootState; g: Graph; prepend: boolean; - seedFieldIdentifier?: FieldIdentifier; - positivePromptFieldIdentifier: FieldIdentifier; + positivePromptNode: Invocation<'string'>; + seedNode?: Invocation<'integer'>; origin: string; destination: string; }): EnqueueBatchArg => { - const { state, g, prepend, seedFieldIdentifier, positivePromptFieldIdentifier, origin, destination } = arg; - const { iterations, model, shouldRandomizeSeed, seed, shouldConcatPrompts } = state.params; + const { state, g, prepend, positivePromptNode, seedNode, origin, destination } = arg; + const { iterations, model, shouldRandomizeSeed, seed } = state.params; const { prompts, seedBehaviour } = state.dynamicPrompts; assert(model, 'No model found in state when preparing batch'); @@ -47,55 +44,27 @@ export const prepareLinearUIBatch = (arg: { const secondBatchDatumList: components['schemas']['BatchDatum'][] = []; // add seeds first to ensure the output order groups the prompts - if (seedFieldIdentifier && seedBehaviour === 'PER_PROMPT') { + if (seedNode && seedBehaviour === 'PER_PROMPT') { const seeds = generateSeeds({ count: prompts.length * iterations, - // Imagen3's support for seeded generation is iffy, we are just not going too use it in linear UI generations. - start: - model.base === 'imagen3' || model.base === 'imagen4' - ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) - : shouldRandomizeSeed - ? undefined - : seed, + start: shouldRandomizeSeed ? undefined : seed, }); firstBatchDatumList.push({ - node_path: seedFieldIdentifier.nodeId, - field_name: seedFieldIdentifier.fieldName, + node_path: seedNode.id, + field_name: 'value', items: seeds, }); - - // add to metadata - g.removeMetadata(['seed']); - firstBatchDatumList.push({ - node_path: g.getMetadataNode().id, - field_name: 'seed', - items: seeds, - }); - } else if (seedFieldIdentifier && seedBehaviour === 'PER_ITERATION') { + } else if (seedNode && seedBehaviour === 'PER_ITERATION') { // seedBehaviour = SeedBehaviour.PerRun const seeds = generateSeeds({ count: iterations, - // Imagen3's support for seeded generation is iffy, we are just not going too use in in linear UI generations. - start: - model.base === 'imagen3' || model.base === 'imagen4' - ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) - : shouldRandomizeSeed - ? undefined - : seed, + start: shouldRandomizeSeed ? undefined : seed, }); secondBatchDatumList.push({ - node_path: seedFieldIdentifier.nodeId, - field_name: seedFieldIdentifier.fieldName, - items: seeds, - }); - - // add to metadata - g.removeMetadata(['seed']); - secondBatchDatumList.push({ - node_path: g.getMetadataNode().id, - field_name: 'seed', + node_path: seedNode.id, + field_name: 'value', items: seeds, }); data.push(secondBatchDatumList); @@ -105,35 +74,11 @@ export const prepareLinearUIBatch = (arg: { // zipped batch of prompts firstBatchDatumList.push({ - node_path: positivePromptFieldIdentifier.nodeId, - field_name: positivePromptFieldIdentifier.fieldName, + node_path: positivePromptNode.id, + field_name: 'value', items: extendedPrompts, }); - // add to metadata - g.removeMetadata(['positive_prompt']); - firstBatchDatumList.push({ - node_path: g.getMetadataNode().id, - field_name: 'positive_prompt', - items: extendedPrompts, - }); - - if (shouldConcatPrompts && model.base === 'sdxl') { - firstBatchDatumList.push({ - node_path: positivePromptFieldIdentifier.nodeId, - field_name: 'style', - items: extendedPrompts, - }); - - // add to metadata - g.removeMetadata(['positive_style_prompt']); - firstBatchDatumList.push({ - node_path: g.getMetadataNode().id, - field_name: 'positive_style_prompt', - items: extendedPrompts, - }); - } - data.push(firstBatchDatumList); const enqueueBatchArg: EnqueueBatchArg = { diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addFLUXFill.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addFLUXFill.ts index 2925630822..282a0568c2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addFLUXFill.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addFLUXFill.ts @@ -7,6 +7,7 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice'; import { selectCanvasSlice } from 'features/controlLayers/store/selectors'; import type { Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; +import { getDenoisingStartAndEnd } from 'features/nodes/util/graph/graphBuilderUtils'; import type { Invocation } from 'services/api/types'; type AddFLUXFillArg = { @@ -28,9 +29,9 @@ export const addFLUXFill = async ({ originalSize, scaledSize, }: AddFLUXFillArg): Promise> => { - // FLUX Fill always fully denoises - denoise.denoising_start = 0; - denoise.denoising_end = 1; + const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state); + denoise.denoising_start = denoising_start; + denoise.denoising_end = denoising_end; const params = selectParamsSlice(state); const canvasSettings = selectCanvasSettingsSlice(state); diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts index c0a3ef3ac4..6ebdfc2cae 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts @@ -1,8 +1,10 @@ import { objectEquals } from '@observ33r/object-equals'; +import type { RootState } from 'app/store/store'; import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager'; import { getPrefixedId } from 'features/controlLayers/konva/util'; import type { CanvasState, Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; +import { getDenoisingStartAndEnd } from 'features/nodes/util/graph/graphBuilderUtils'; import type { DenoiseLatentsNodes, LatentToImageNodes, @@ -13,6 +15,7 @@ import type { Invocation } from 'services/api/types'; type AddImageToImageArg = { g: Graph; + state: RootState; manager: CanvasManager; l2i: Invocation; i2l: Invocation<'i2l' | 'flux_vae_encode' | 'sd3_i2l' | 'cogview4_i2l'>; @@ -21,11 +24,11 @@ type AddImageToImageArg = { originalSize: Dimensions; scaledSize: Dimensions; bbox: CanvasState['bbox']; - denoising_start: number; }; export const addImageToImage = async ({ g, + state, manager, l2i, i2l, @@ -34,9 +37,11 @@ export const addImageToImage = async ({ originalSize, scaledSize, bbox, - denoising_start, }: AddImageToImageArg): Promise> => { + const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state); denoise.denoising_start = denoising_start; + denoise.denoising_end = denoising_end; + const adapters = manager.compositor.getVisibleAdaptersOfType('raster_layer'); const { image_name } = await manager.compositor.getCompositeImageDTO(adapters, bbox.rect, { is_intermediate: true, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts index 48adbc150e..d712b12993 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts @@ -7,7 +7,7 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice'; import { selectCanvasSlice } from 'features/controlLayers/store/selectors'; import type { Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { isMainModelWithoutUnet } from 'features/nodes/util/graph/graphBuilderUtils'; +import { getDenoisingStartAndEnd, isMainModelWithoutUnet } from 'features/nodes/util/graph/graphBuilderUtils'; import type { DenoiseLatentsNodes, LatentToImageNodes, @@ -17,8 +17,8 @@ import type { import type { ImageDTO, Invocation } from 'services/api/types'; type AddInpaintArg = { - state: RootState; g: Graph; + state: RootState; manager: CanvasManager; l2i: Invocation; i2l: Invocation<'i2l' | 'flux_vae_encode' | 'sd3_i2l' | 'cogview4_i2l'>; @@ -27,13 +27,12 @@ type AddInpaintArg = { modelLoader: Invocation; originalSize: Dimensions; scaledSize: Dimensions; - denoising_start: number; seed: Invocation<'integer'>; }; export const addInpaint = async ({ - state, g, + state, manager, l2i, i2l, @@ -42,10 +41,11 @@ export const addInpaint = async ({ modelLoader, originalSize, scaledSize, - denoising_start, seed, }: AddInpaintArg): Promise> => { + const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state); denoise.denoising_start = denoising_start; + denoise.denoising_end = denoising_end; const params = selectParamsSlice(state); const canvasSettings = selectCanvasSettingsSlice(state); diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts index 71f634848f..15a19f582d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts @@ -7,7 +7,11 @@ import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice'; import { selectCanvasSlice } from 'features/controlLayers/store/selectors'; import type { Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { getInfill, isMainModelWithoutUnet } from 'features/nodes/util/graph/graphBuilderUtils'; +import { + getDenoisingStartAndEnd, + getInfill, + isMainModelWithoutUnet, +} from 'features/nodes/util/graph/graphBuilderUtils'; import type { DenoiseLatentsNodes, ImageToLatentsNodes, @@ -28,7 +32,6 @@ type AddOutpaintArg = { modelLoader: Invocation; originalSize: Dimensions; scaledSize: Dimensions; - denoising_start: number; seed: Invocation<'integer'>; }; @@ -43,10 +46,11 @@ export const addOutpaint = async ({ modelLoader, originalSize, scaledSize, - denoising_start, seed, }: AddOutpaintArg): Promise> => { + const { denoising_start, denoising_end } = getDenoisingStartAndEnd(state); denoise.denoising_start = denoising_start; + denoise.denoising_end = denoising_end; const params = selectParamsSlice(state); const canvasSettings = selectCanvasSettingsSlice(state); diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts index 227d2066bd..436285fc9d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts @@ -2,11 +2,12 @@ import { objectEquals } from '@observ33r/object-equals'; import { getPrefixedId } from 'features/controlLayers/konva/util'; import type { Dimensions } from 'features/controlLayers/store/types'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; -import type { LatentToImageNodes } from 'features/nodes/util/graph/types'; +import type { DenoiseLatentsNodes, LatentToImageNodes } from 'features/nodes/util/graph/types'; import type { Invocation } from 'services/api/types'; type AddTextToImageArg = { g: Graph; + denoise: Invocation; l2i: Invocation; originalSize: Dimensions; scaledSize: Dimensions; @@ -14,10 +15,14 @@ type AddTextToImageArg = { export const addTextToImage = ({ g, + denoise, l2i, originalSize, scaledSize, }: AddTextToImageArg): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode' | 'sd3_l2i' | 'cogview4_l2i'> => { + denoise.denoising_start = 0; + denoise.denoising_end = 1; + if (!objectEquals(scaledSize, originalSize)) { // We need to resize the output image back to the original size const resizeImageToOriginalSize = g.addNode({ diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts index 1d12e4a364..4770a77df1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildChatGPT4oGraph.ts @@ -6,11 +6,7 @@ import { isChatGPT4oAspectRatioID, isChatGPT4oReferenceImageConfig } from 'featu import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators'; import { type ImageField, zModelIdentifierField } from 'features/nodes/types/common'; import { Graph } from 'features/nodes/util/graph/generation/Graph'; -import { - selectCanvasOutputFields, - selectOriginalAndScaledSizes, - selectPresetModifiedPrompts, -} from 'features/nodes/util/graph/graphBuilderUtils'; +import { selectCanvasOutputFields, selectOriginalAndScaledSizes } from 'features/nodes/util/graph/graphBuilderUtils'; import type { GraphBuilderArg, GraphBuilderReturn } from 'features/nodes/util/graph/types'; import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; import { t } from 'i18next'; @@ -33,10 +29,9 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise entity.isEnabled) @@ -60,24 +55,35 @@ export const buildChatGPT4oGraph = async (arg: GraphBuilderArg): Promise => { const { generationMode, state, manager } = arg; + log.debug({ generationMode, manager: manager?.id }, 'Building CogView4 graph'); + const model = selectMainModelConfig(state); + assert(model, 'No model selected'); + assert(model.base === 'cogview4', 'Selected model is not a CogView4 model'); + const params = selectParamsSlice(state); const canvas = selectCanvasSlice(state); const { bbox } = canvas; - const { model, cfgScale: cfg_scale, seed: _seed, steps } = params; - - assert(model, 'No model found in state'); + const { cfgScale: cfg_scale, seed: _seed, steps } = params; const { originalSize, scaledSize } = selectOriginalAndScaledSizes(state); - const { positivePrompt, negativePrompt } = selectPresetModifiedPrompts(state); + const prompts = selectPresetModifiedPrompts(state); const g = new Graph(getPrefixedId('cogview4_graph')); - const seed = g.addNode({ - id: getPrefixedId('seed'), - type: 'integer', - value: _seed, - }); + const modelLoader = g.addNode({ type: 'cogview4_model_loader', id: getPrefixedId('cogview4_model_loader'), model, }); + + const positivePrompt = g.addNode({ + id: getPrefixedId('positive_prompt'), + type: 'string', + }); const posCond = g.addNode({ type: 'cogview4_text_encoder', id: getPrefixedId('pos_prompt'), - prompt: positivePrompt, }); const negCond = g.addNode({ type: 'cogview4_text_encoder', id: getPrefixedId('neg_prompt'), - prompt: negativePrompt, + prompt: prompts.negative, }); + const seed = g.addNode({ + id: getPrefixedId('seed'), + type: 'integer', + value: _seed, + }); const denoise = g.addNode({ type: 'cogview4_denoise', id: getPrefixedId('denoise_latents'), @@ -69,8 +77,6 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise = l2i; if (generationMode === 'txt2img') { - canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize }); + canvasOutput = addTextToImage({ + g, + denoise, + l2i, + originalSize, + scaledSize, + }); g.upsertMetadata({ generation_mode: 'cogview4_txt2img' }); } else if (generationMode === 'img2img') { assert(manager !== null); canvasOutput = await addImageToImage({ g, + state, manager, + denoise, l2i, i2l, - denoise, vaeSource: modelLoader, originalSize, scaledSize, bbox, - denoising_start, }); g.upsertMetadata({ generation_mode: 'cogview4_img2img' }); } else if (generationMode === 'inpaint') { assert(manager !== null); canvasOutput = await addInpaint({ - state, g, + state, manager, l2i, i2l, @@ -140,15 +153,14 @@ export const buildCogView4Graph = async (arg: GraphBuilderArg): Promise entity.isEnabled) @@ -170,49 +186,13 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise0.9). - denoising_start = 1 - img2imgStrength ** 0.2; - } else { - denoising_start = 1 - img2imgStrength; - } - let canvasOutput: Invocation = l2i; if (isFLUXFill && (generationMode === 'inpaint' || generationMode === 'outpaint')) { assert(manager !== null); canvasOutput = await addFLUXFill({ - state, g, + state, manager, l2i, denoise, @@ -220,12 +200,19 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise { const { generationMode, state, manager } = arg; + const model = selectMainModelConfig(state); + assert(model, 'No model selected'); + assert(model.base === 'flux-kontext', 'Selected model is not a FLUX Kontext API model'); + if (generationMode !== 'txt2img') { throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'FLUX Kontext' })); } log.debug({ generationMode, manager: manager?.id }, 'Building FLUX Kontext graph'); - const model = selectMainModelConfig(state); - const canvas = selectCanvasSlice(state); const refImages = selectRefImagesSlice(state); const { bbox } = canvas; - const { positivePrompt } = selectPresetModifiedPrompts(state); - - assert(model, 'No model found in state'); - assert(model.base === 'flux-kontext', 'Model is not a Flux Kontext model'); const validRefImages = refImages.entities .filter((entity) => entity.isEnabled) @@ -54,24 +52,35 @@ export const buildFluxKontextGraph = (arg: GraphBuilderArg): GraphBuilderReturn } const g = new Graph(getPrefixedId('flux_kontext_txt2img_graph')); + const positivePrompt = g.addNode({ + id: getPrefixedId('positive_prompt'), + type: 'string', + }); const fluxKontextImage = g.addNode({ // @ts-expect-error: These nodes are not available in the OSS application type: input_image ? 'flux_kontext_edit_image' : 'flux_kontext_generate_image', model: zModelIdentifierField.parse(model), - positive_prompt: positivePrompt, aspect_ratio: bbox.aspectRatio.id, input_image, prompt_upsampling: true, ...selectCanvasOutputFields(state), }); + + g.addEdge( + positivePrompt, + 'value', + fluxKontextImage, + // @ts-expect-error: These nodes are not available in the OSS application + 'positive_prompt' + ); + g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); g.upsertMetadata({ - positive_prompt: positivePrompt, model: Graph.getModelMetadataField(model), width: bbox.rect.width, height: bbox.rect.height, }); return { g, - positivePromptFieldIdentifier: { nodeId: fluxKontextImage.id, fieldName: 'positive_prompt' }, + positivePrompt, }; }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts index 13998f9792..a5268f22a7 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen3Graph.ts @@ -15,39 +15,51 @@ const log = logger('system'); export const buildImagen3Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { const { generationMode, state, manager } = arg; + log.debug({ generationMode, manager: manager?.id }, 'Building Imagen3 graph'); + + const model = selectMainModelConfig(state); + + assert(model, 'No model selected'); + assert(model.base === 'imagen3', 'Selected model is not an Imagen3 API model'); if (generationMode !== 'txt2img') { throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen3' })); } - log.debug({ generationMode, manager: manager?.id }, 'Building Imagen3 graph'); - const canvas = selectCanvasSlice(state); const { bbox } = canvas; - const { positivePrompt, negativePrompt } = selectPresetModifiedPrompts(state); - const model = selectMainModelConfig(state); - - assert(model, 'No model found for Imagen3 graph'); - assert(model.base === 'imagen3', 'Imagen3 graph requires Imagen3 model'); + const prompts = selectPresetModifiedPrompts(state); assert(isImagenAspectRatioID(bbox.aspectRatio.id), 'Imagen3 does not support this aspect ratio'); - assert(positivePrompt.length > 0, 'Imagen3 requires positive prompt to have at least one character'); + assert(prompts.positive.length > 0, 'Imagen3 requires positive prompt to have at least one character'); const g = new Graph(getPrefixedId('imagen3_txt2img_graph')); + const positivePrompt = g.addNode({ + id: getPrefixedId('positive_prompt'), + type: 'string', + }); const imagen3 = g.addNode({ // @ts-expect-error: These nodes are not available in the OSS application type: 'google_imagen3_generate_image', model: zModelIdentifierField.parse(model), - positive_prompt: positivePrompt, - negative_prompt: negativePrompt, + negative_prompt: prompts.negative, aspect_ratio: bbox.aspectRatio.id, // When enhance_prompt is true, Imagen3 will return a new image every time, ignoring the seed. enhance_prompt: true, ...selectCanvasOutputFields(state), }); + + g.addEdge( + positivePrompt, + 'value', + imagen3, + // @ts-expect-error: These nodes are not available in the OSS application + 'positive_prompt' + ); + g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); + g.upsertMetadata({ - positive_prompt: positivePrompt, - negative_prompt: negativePrompt, + negative_prompt: prompts.negative, width: bbox.rect.width, height: bbox.rect.height, model: Graph.getModelMetadataField(model), @@ -56,7 +68,6 @@ export const buildImagen3Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { return { g, - seedFieldIdentifier: { nodeId: imagen3.id, fieldName: 'seed' }, - positivePromptFieldIdentifier: { nodeId: imagen3.id, fieldName: 'positive_prompt' }, + positivePrompt, }; }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts index 4b7b0a6d77..d2cf67934d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildImagen4Graph.ts @@ -15,39 +15,51 @@ const log = logger('system'); export const buildImagen4Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { const { generationMode, state, manager } = arg; + log.debug({ generationMode, manager: manager?.id }, 'Building Imagen4 graph'); + + const model = selectMainModelConfig(state); + assert(model, 'No model selected'); + assert(model.base === 'imagen4', 'Selected model is not a Imagen4 API model'); if (generationMode !== 'txt2img') { throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen4' })); } - log.debug({ generationMode, manager: manager?.id }, 'Building Imagen4 graph'); - const canvas = selectCanvasSlice(state); const { bbox } = canvas; - const { positivePrompt, negativePrompt } = selectPresetModifiedPrompts(state); - const model = selectMainModelConfig(state); - assert(model, 'No model found for Imagen4 graph'); - assert(model.base === 'imagen4', 'Imagen4 graph requires Imagen4 model'); + const prompts = selectPresetModifiedPrompts(state); assert(isImagenAspectRatioID(bbox.aspectRatio.id), 'Imagen4 does not support this aspect ratio'); - assert(positivePrompt.length > 0, 'Imagen4 requires positive prompt to have at least one character'); + assert(prompts.positive.length > 0, 'Imagen4 requires positive prompt to have at least one character'); const g = new Graph(getPrefixedId('imagen4_txt2img_graph')); + const positivePrompt = g.addNode({ + id: getPrefixedId('positive_prompt'), + type: 'string', + }); const imagen4 = g.addNode({ // @ts-expect-error: These nodes are not available in the OSS application type: 'google_imagen4_generate_image', model: zModelIdentifierField.parse(model), - positive_prompt: positivePrompt, - negative_prompt: negativePrompt, + negative_prompt: prompts.negative, aspect_ratio: bbox.aspectRatio.id, // When enhance_prompt is true, Imagen4 will return a new image every time, ignoring the seed. enhance_prompt: true, ...selectCanvasOutputFields(state), }); + + g.addEdge( + positivePrompt, + 'value', + imagen4, + // @ts-expect-error: These nodes are not available in the OSS application + 'positive_prompt' + ); + g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); + g.upsertMetadata({ - positive_prompt: positivePrompt, - negative_prompt: negativePrompt, + negative_prompt: prompts.negative, width: bbox.rect.width, height: bbox.rect.height, model: Graph.getModelMetadataField(model), @@ -56,7 +68,6 @@ export const buildImagen4Graph = (arg: GraphBuilderArg): GraphBuilderReturn => { return { g, - seedFieldIdentifier: { nodeId: imagen4.id, fieldName: 'seed' }, - positivePromptFieldIdentifier: { nodeId: imagen4.id, fieldName: 'positive_prompt' }, + positivePrompt, }; }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts index 36b15ae456..f929b54bdd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildSD1Graph.ts @@ -31,14 +31,18 @@ const log = logger('system'); export const buildSD1Graph = async (arg: GraphBuilderArg): Promise => { const { generationMode, state, manager } = arg; + log.debug({ generationMode, manager: manager?.id }, 'Building SD1/SD2 graph'); + const model = selectMainModelConfig(state); + assert(model, 'No model selected'); + assert(model.base === 'sd-1' || model.base === 'sd-2', 'Selected model is not a SDXL model'); + const params = selectParamsSlice(state); const canvas = selectCanvasSlice(state); const refImages = selectRefImagesSlice(state); const { bbox } = canvas; - const model = selectMainModelConfig(state); const { cfgScale: cfg_scale, @@ -52,10 +56,8 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise = seamless ?? vaeLoader ?? modelLoader; g.addEdge(vaeSource, 'vae', l2i, 'vae'); - const denoising_start = 1 - params.img2imgStrength; - let canvasOutput: Invocation = l2i; if (generationMode === 'txt2img') { - canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize }); + canvasOutput = addTextToImage({ + g, + denoise, + l2i, + originalSize, + scaledSize, + }); g.upsertMetadata({ generation_mode: 'txt2img' }); } else if (generationMode === 'img2img') { assert(manager !== null); canvasOutput = await addImageToImage({ g, + state, manager, l2i, i2l, @@ -186,14 +199,13 @@ export const buildSD1Graph = async (arg: GraphBuilderArg): Promise => { const { generationMode, state, manager } = arg; + log.debug({ generationMode, manager: manager?.id }, 'Building SD3 graph'); const model = selectMainModelConfig(state); @@ -34,27 +35,13 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise0.9). - denoising_start = 1 - img2imgStrength ** 0.2; - } else { - denoising_start = 1 - img2imgStrength; - } + g.addEdgeToMetadata(seed, 'value', 'seed'); + g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt'); let canvasOutput: Invocation = l2i; if (generationMode === 'txt2img') { - canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize }); + canvasOutput = addTextToImage({ + g, + denoise, + l2i, + originalSize, + scaledSize, + }); g.upsertMetadata({ generation_mode: 'sd3_txt2img' }); } else if (generationMode === 'img2img') { assert(manager !== null); canvasOutput = await addImageToImage({ g, + state, manager, l2i, i2l, @@ -148,14 +143,13 @@ export const buildSD3Graph = async (arg: GraphBuilderArg): Promise => { const { generationMode, state, manager } = arg; + log.debug({ generationMode, manager: manager?.id }, 'Building SDXL graph'); const model = selectMainModelConfig(state); - assert(model, 'No model found in state'); - assert(model.base === 'sdxl'); + assert(model, 'No model selected'); + assert(model.base === 'sdxl', 'Selected model is not a SDXL Kontext model'); const params = selectParamsSlice(state); const canvas = selectCanvasSlice(state); @@ -53,47 +54,49 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise = l2i; if (generationMode === 'txt2img') { - canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize }); + canvasOutput = addTextToImage({ + g, + denoise, + l2i, + originalSize, + scaledSize, + }); g.upsertMetadata({ generation_mode: 'sdxl_txt2img' }); } else if (generationMode === 'img2img') { assert(manager !== null); canvasOutput = await addImageToImage({ g, + state, manager, l2i, i2l, @@ -193,14 +209,13 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise nodeId.split(':')[0] === CANVAS_OUTPUT_PREFIX; + +export const getDenoisingStartAndEnd = (state: RootState): { denoising_start: number; denoising_end: number } => { + const optimizedDenoisingEnabled = selectOptimizedDenoisingEnabled(state); + const denoisingStrength = selectImg2imgStrength(state); + const model = selectMainModelConfig(state); + const refinerModel = selectRefinerModel(state); + const refinerDenoisingStart = selectRefinerStart(state); + + switch (model?.base) { + case 'sd-3': { + // We rescale the img2imgStrength (with exponent 0.2) to effectively use the entire range [0, 1] and make the scale + // more user-friendly for SD3.5. Without this, most of the 'change' is concentrated in the high denoise strength + // range (>0.9). + const exponent = optimizedDenoisingEnabled ? 0.2 : 1; + return { + denoising_start: 1 - denoisingStrength ** exponent, + denoising_end: 1, + }; + } + case 'flux': { + if (model.variant === 'inpaint') { + // This is a FLUX Fill model - we always denoise fully + return { + denoising_start: 0, + denoising_end: 1, + }; + } else { + // We rescale the img2imgStrength (with exponent 0.2) to effectively use the entire range [0, 1] and make the scale + // more user-friendly for SD3.5. Without this, most of the 'change' is concentrated in the high denoise strength + // range (>0.9). + const exponent = optimizedDenoisingEnabled ? 0.2 : 1; + return { + denoising_start: 1 - denoisingStrength ** exponent, + denoising_end: 1, + }; + } + } + case 'sd-1': + case 'sd-2': + case 'cogview4': { + return { + denoising_start: 1 - denoisingStrength, + denoising_end: 1, + }; + } + case 'sdxl': { + if (refinerModel) { + return { + denoising_start: Math.min(refinerDenoisingStart, 1 - denoisingStrength), + denoising_end: refinerDenoisingStart, + }; + } else { + return { + denoising_start: 1 - denoisingStrength, + denoising_end: 1, + }; + } + } + default: { + assert(false, `Unsupported base: ${model?.base}`); + } + } +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/types.ts b/invokeai/frontend/web/src/features/nodes/util/graph/types.ts index 0a3c97483f..3788683042 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/types.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/types.ts @@ -1,8 +1,8 @@ import type { RootState } from 'app/store/store'; import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager'; import type { GenerationMode } from 'features/controlLayers/store/types'; -import type { FieldIdentifier } from 'features/nodes/types/field'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; +import type { Invocation } from 'services/api/types'; export type ImageOutputNodes = | 'l2i' @@ -38,8 +38,8 @@ export type GraphBuilderArg = { export type GraphBuilderReturn = { g: Graph; - seedFieldIdentifier?: FieldIdentifier; - positivePromptFieldIdentifier: FieldIdentifier; + seed?: Invocation<'integer'>; + positivePrompt: Invocation<'string'>; }; export class UnsupportedGenerationModeError extends Error { diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts index dd8747b927..b69552847d 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts @@ -97,15 +97,15 @@ const enqueueCanvas = async (store: AppStore, canvasManager: CanvasManager, prep return; } - const { g, seedFieldIdentifier, positivePromptFieldIdentifier } = buildGraphResult.value; + const { g, seed, positivePrompt } = buildGraphResult.value; const prepareBatchResult = withResult(() => prepareLinearUIBatch({ state, g, prepend, - seedFieldIdentifier, - positivePromptFieldIdentifier, + seedNode: seed, + positivePromptNode: positivePrompt, origin: 'canvas', destination, }) diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts index 59651fe91c..a6a1d2ecfe 100644 --- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts +++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts @@ -95,15 +95,15 @@ const enqueueGenerate = async (store: AppStore, prepend: boolean) => { return; } - const { g, seedFieldIdentifier, positivePromptFieldIdentifier } = buildGraphResult.value; + const { g, seed, positivePrompt } = buildGraphResult.value; const prepareBatchResult = withResult(() => prepareLinearUIBatch({ state, g, prepend, - seedFieldIdentifier, - positivePromptFieldIdentifier, + seedNode: seed, + positivePromptNode: positivePrompt, origin: 'canvas', destination, })