tidy(ui): remove unused stuff

This commit is contained in:
psychedelicious
2024-08-23 11:27:03 +10:00
parent 639304197b
commit c2be63a811
32 changed files with 24 additions and 1984 deletions

View File

@@ -1,166 +0,0 @@
import type { RootState } from 'app/store/store';
import { deepClone } from 'common/util/deepClone';
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
import {
DENOISE_LATENTS_HRF,
ESRGAN_HRF,
IMAGE_TO_LATENTS_HRF,
LATENTS_TO_IMAGE_HRF_HR,
LATENTS_TO_IMAGE_HRF_LR,
NOISE_HRF,
RESIZE_HRF,
} from 'features/nodes/util/graph/constants';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField } from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
/**
* Calculates the new resolution for high-resolution features (HRF) based on base model type.
* Adjusts the width and height to maintain the aspect ratio and constrains them by the model's dimension limits,
* rounding down to the nearest multiple of 8.
*
* @param {number} optimalDimension The optimal dimension for the base model.
* @param {number} width The current width to be adjusted for HRF.
* @param {number} height The current height to be adjusted for HRF.
* @return {{newWidth: number, newHeight: number}} The new width and height, adjusted and rounded as needed.
*/
function calculateHrfRes(
optimalDimension: number,
width: number,
height: number
): { newWidth: number; newHeight: number } {
const aspect = width / height;
const minDimension = Math.floor(optimalDimension * 0.5);
const modelArea = optimalDimension * optimalDimension; // Assuming square images for model_area
let initWidth;
let initHeight;
if (aspect > 1.0) {
initHeight = Math.max(minDimension, Math.sqrt(modelArea / aspect));
initWidth = initHeight * aspect;
} else {
initWidth = Math.max(minDimension, Math.sqrt(modelArea * aspect));
initHeight = initWidth / aspect;
}
// Cap initial height and width to final height and width.
initWidth = Math.min(width, initWidth);
initHeight = Math.min(height, initHeight);
const newWidth = roundToMultiple(Math.floor(initWidth), 8);
const newHeight = roundToMultiple(Math.floor(initHeight), 8);
return { newWidth, newHeight };
}
/**
* Adds HRF to the graph.
* @param state The root redux state
* @param g The graph to add HRF to
* @param denoise The denoise node
* @param noise The noise node
* @param l2i The l2i node
* @param vaeSource The VAE source node (may be a model loader, VAE loader, or seamless node)
* @returns The HRF image output node.
*/
export const addHRF = (
state: RootState,
g: Graph,
denoise: Invocation<'denoise_latents'>,
noise: Invocation<'noise'>,
l2i: Invocation<'l2i'>,
vaeSource: Invocation<'vae_loader'> | Invocation<'main_model_loader'> | Invocation<'seamless'>
): Invocation<'l2i'> => {
const { hrfStrength, hrfEnabled, hrfMethod } = state.hrf;
const { width, height } = state.canvasV2.document;
const optimalDimension = selectOptimalDimension(state);
const { newWidth: hrfWidth, newHeight: hrfHeight } = calculateHrfRes(optimalDimension, width, height);
// Change height and width of original noise node to initial resolution.
if (noise) {
noise.width = hrfWidth;
noise.height = hrfHeight;
}
// Define new nodes and their connections, roughly in order of operations.
const l2iHrfLR = g.addNode({ type: 'l2i', id: LATENTS_TO_IMAGE_HRF_LR, fp32: l2i.fp32 });
g.addEdge(denoise, 'latents', l2iHrfLR, 'latents');
g.addEdge(vaeSource, 'vae', l2iHrfLR, 'vae');
const resizeHrf = g.addNode({
id: RESIZE_HRF,
type: 'img_resize',
width: width,
height: height,
});
if (hrfMethod === 'ESRGAN') {
let model_name: Invocation<'esrgan'>['model_name'] = 'RealESRGAN_x2plus.pth';
if ((width * height) / (hrfWidth * hrfHeight) > 2) {
model_name = 'RealESRGAN_x4plus.pth';
}
const esrganHrf = g.addNode({ id: ESRGAN_HRF, type: 'esrgan', model_name });
g.addEdge(l2iHrfLR, 'image', esrganHrf, 'image');
g.addEdge(esrganHrf, 'image', resizeHrf, 'image');
} else {
g.addEdge(l2iHrfLR, 'image', resizeHrf, 'image');
}
const noiseHrf = g.addNode({
type: 'noise',
id: NOISE_HRF,
seed: noise.seed,
use_cpu: noise.use_cpu,
});
g.addEdge(resizeHrf, 'height', noiseHrf, 'height');
g.addEdge(resizeHrf, 'width', noiseHrf, 'width');
const i2lHrf = g.addNode({ type: 'i2l', id: IMAGE_TO_LATENTS_HRF });
g.addEdge(vaeSource, 'vae', i2lHrf, 'vae');
g.addEdge(resizeHrf, 'image', i2lHrf, 'image');
const denoiseHrf = g.addNode({
type: 'denoise_latents',
id: DENOISE_LATENTS_HRF,
cfg_scale: denoise.cfg_scale,
scheduler: denoise.scheduler,
steps: denoise.steps,
denoising_start: 1 - hrfStrength,
denoising_end: 1,
});
g.addEdge(i2lHrf, 'latents', denoiseHrf, 'latents');
g.addEdge(noiseHrf, 'noise', denoiseHrf, 'noise');
// Copy edges to the original denoise into the new denoise
g.getEdgesTo(denoise, ['control', 'ip_adapter', 'unet', 'positive_conditioning', 'negative_conditioning']).forEach(
(edge) => {
const clone = deepClone(edge);
clone.destination.node_id = denoiseHrf.id;
g.addEdgeFromObj(clone);
}
);
// The original l2i node is unnecessary now, remove it
g.deleteNode(l2i.id);
const l2iHrfHR = g.addNode({
type: 'l2i',
id: LATENTS_TO_IMAGE_HRF_HR,
fp32: l2i.fp32,
is_intermediate: false,
board: getBoardField(state),
});
g.addEdge(vaeSource, 'vae', l2iHrfHR, 'vae');
g.addEdge(denoiseHrf, 'latents', l2iHrfHR, 'latents');
g.upsertMetadata({
hrf_strength: hrfStrength,
hrf_enabled: hrfEnabled,
hrf_method: hrfMethod,
});
g.setMetadataReceivingNode(l2iHrfHR);
return l2iHrfHR;
};

View File

@@ -1,5 +0,0 @@
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
export const isValidLayer = (layer: CanvasRasterLayerState) => {
return layer.isEnabled && layer.objects.length > 0;
};

View File

@@ -1,197 +0,0 @@
import type { RootState } from 'app/store/store';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import {
LATENTS_TO_IMAGE,
NEGATIVE_CONDITIONING,
NEGATIVE_CONDITIONING_COLLECT,
NOISE,
POSITIVE_CONDITIONING,
POSITIVE_CONDITIONING_COLLECT,
SDXL_CONTROL_LAYERS_GRAPH,
SDXL_DENOISE_LATENTS,
SDXL_MODEL_LOADER,
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters';
import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addSDXLLoRAs } from 'features/nodes/util/graph/generation/addSDXLLoRAs';
import { addSDXLRefiner } from 'features/nodes/util/graph/generation/addSDXLRefiner';
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts , getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation, NonNullableGraph } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
export const buildImageToImageSDXLGraph = async (
state: RootState,
manager: CanvasManager
): Promise<NonNullableGraph> => {
const { bbox, params } = state.canvasV2;
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
seed,
steps,
shouldUseCpuNoise,
vaePrecision,
vae,
refinerModel,
refinerStart,
img2imgStrength,
} = params;
assert(model, 'No model found in state');
const { positivePrompt, negativePrompt, positiveStylePrompt, negativeStylePrompt } = getPresetModifiedPrompts(state);
const { originalSize, scaledSize } = getSizes(bbox);
const g = new Graph(SDXL_CONTROL_LAYERS_GRAPH);
const modelLoader = g.addNode({
type: 'sdxl_model_loader',
id: SDXL_MODEL_LOADER,
model,
});
const posCond = g.addNode({
type: 'sdxl_compel_prompt',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
style: positiveStylePrompt,
});
const posCondCollect = g.addNode({
type: 'collect',
id: POSITIVE_CONDITIONING_COLLECT,
});
const negCond = g.addNode({
type: 'sdxl_compel_prompt',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
style: negativeStylePrompt,
});
const negCondCollect = g.addNode({
type: 'collect',
id: NEGATIVE_CONDITIONING_COLLECT,
});
const noise = g.addNode({
type: 'noise',
id: NOISE,
seed,
width: scaledSize.width,
height: scaledSize.height,
use_cpu: shouldUseCpuNoise,
});
const denoise = g.addNode({
type: 'denoise_latents',
id: SDXL_DENOISE_LATENTS,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: refinerModel ? Math.min(refinerStart, 1 - img2imgStrength) : 1 - img2imgStrength,
denoising_end: refinerModel ? refinerStart : 1,
});
const l2i = g.addNode({
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32: vaePrecision === 'fp32',
board: getBoardField(state),
// This is the terminal node and must always save to gallery.
is_intermediate: false,
use_cache: false,
});
const vaeLoader =
vae?.base === model.base
? g.addNode({
type: 'vae_loader',
id: VAE_LOADER,
vae_model: vae,
})
: null;
let imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> =
l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 'clip', negCond, 'clip');
g.addEdge(modelLoader, 'clip2', posCond, 'clip2');
g.addEdge(modelLoader, 'clip2', negCond, 'clip2');
g.addEdge(posCond, 'conditioning', posCondCollect, 'item');
g.addEdge(negCond, 'conditioning', negCondCollect, 'item');
g.addEdge(posCondCollect, 'collection', denoise, 'positive_conditioning');
g.addEdge(negCondCollect, 'collection', denoise, 'negative_conditioning');
g.addEdge(noise, 'noise', denoise, 'noise');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'sdxl');
g.upsertMetadata({
generation_mode: 'sdxl_txt2img',
cfg_scale,
cfg_rescale_multiplier,
width: scaledSize.width,
height: scaledSize.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: Graph.getModelMetadataField(modelConfig),
seed,
steps,
rand_device: shouldUseCpuNoise ? 'cpu' : 'cuda',
scheduler,
positive_style_prompt: positiveStylePrompt,
negative_style_prompt: negativeStylePrompt,
vae: vae ?? undefined,
});
const seamless = addSeamless(state, g, denoise, modelLoader, vaeLoader);
addSDXLLoRAs(state, g, denoise, modelLoader, seamless, posCond, negCond);
// We might get the VAE from the main model, custom VAE, or seamless node.
const vaeSource = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
// Add Refiner if enabled
if (refinerModel) {
await addSDXLRefiner(state, g, denoise, seamless, posCond, negCond, l2i);
}
const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base);
const _addedIPAs = addIPAdapters(state.canvasV2.ipAdapters.entities, g, denoise, modelConfig.base);
const _addedRegions = await addRegions(
manager,
state.canvasV2.regions.entities,
g,
state.canvasV2.document,
state.canvasV2.bbox,
modelConfig.base,
denoise,
posCond,
negCond,
posCondCollect,
negCondCollect
);
if (state.system.shouldUseNSFWChecker) {
imageOutput = addNSFWChecker(g, imageOutput);
}
if (state.system.shouldUseWatermarker) {
imageOutput = addWatermarker(g, imageOutput);
}
g.setMetadataReceivingNode(imageOutput);
return g.getGraph();
};

View File

@@ -1,205 +0,0 @@
import type { RootState } from 'app/store/store';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
import {
CLIP_SKIP,
CONTROL_LAYERS_GRAPH,
DENOISE_LATENTS,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
NEGATIVE_CONDITIONING,
NEGATIVE_CONDITIONING_COLLECT,
NOISE,
POSITIVE_CONDITIONING,
POSITIVE_CONDITIONING_COLLECT,
VAE_LOADER,
} from 'features/nodes/util/graph/constants';
import { addControlAdapters } from 'features/nodes/util/graph/generation/addControlAdapters';
// import { addHRF } from 'features/nodes/util/graph/generation/addHRF';
import { addIPAdapters } from 'features/nodes/util/graph/generation/addIPAdapters';
import { addLoRAs } from 'features/nodes/util/graph/generation/addLoRAs';
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import type { GraphType } from 'features/nodes/util/graph/generation/Graph';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts , getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
export const buildTextToImageSD1SD2Graph = async (state: RootState, manager: CanvasManager): Promise<GraphType> => {
const { bbox, params } = state.canvasV2;
const {
model,
cfgScale: cfg_scale,
cfgRescaleMultiplier: cfg_rescale_multiplier,
scheduler,
steps,
clipSkip: skipped_layers,
shouldUseCpuNoise,
vaePrecision,
seed,
vae,
} = params;
assert(model, 'No model found in state');
const { positivePrompt, negativePrompt } = getPresetModifiedPrompts(state);
const { originalSize, scaledSize } = getSizes(bbox);
const g = new Graph(CONTROL_LAYERS_GRAPH);
const modelLoader = g.addNode({
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
model,
});
const clipSkip = g.addNode({
type: 'clip_skip',
id: CLIP_SKIP,
skipped_layers,
});
const posCond = g.addNode({
type: 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
});
const posCondCollect = g.addNode({
type: 'collect',
id: POSITIVE_CONDITIONING_COLLECT,
});
const negCond = g.addNode({
type: 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
});
const negCondCollect = g.addNode({
type: 'collect',
id: NEGATIVE_CONDITIONING_COLLECT,
});
const noise = g.addNode({
type: 'noise',
id: NOISE,
seed,
width: scaledSize.width,
height: scaledSize.height,
use_cpu: shouldUseCpuNoise,
});
const denoise = g.addNode({
type: 'denoise_latents',
id: DENOISE_LATENTS,
cfg_scale,
cfg_rescale_multiplier,
scheduler,
steps,
denoising_start: 0,
denoising_end: 1,
});
const l2i = g.addNode({
type: 'l2i',
id: LATENTS_TO_IMAGE,
fp32: vaePrecision === 'fp32',
board: getBoardField(state),
// This is the terminal node and must always save to gallery.
is_intermediate: false,
use_cache: false,
});
const vaeLoader =
vae?.base === model.base
? g.addNode({
type: 'vae_loader',
id: VAE_LOADER,
vae_model: vae,
})
: null;
let imageOutput: Invocation<'l2i'> | Invocation<'img_nsfw'> | Invocation<'img_watermark'> | Invocation<'img_resize'> =
l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', clipSkip, 'clip');
g.addEdge(clipSkip, 'clip', posCond, 'clip');
g.addEdge(clipSkip, 'clip', negCond, 'clip');
g.addEdge(posCond, 'conditioning', posCondCollect, 'item');
g.addEdge(negCond, 'conditioning', negCondCollect, 'item');
g.addEdge(posCondCollect, 'collection', denoise, 'positive_conditioning');
g.addEdge(negCondCollect, 'collection', denoise, 'negative_conditioning');
g.addEdge(noise, 'noise', denoise, 'noise');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'sd-1' || modelConfig.base === 'sd-2');
g.upsertMetadata({
generation_mode: 'txt2img',
cfg_scale,
cfg_rescale_multiplier,
width: scaledSize.width,
height: scaledSize.height,
positive_prompt: positivePrompt,
negative_prompt: negativePrompt,
model: Graph.getModelMetadataField(modelConfig),
seed,
steps,
rand_device: shouldUseCpuNoise ? 'cpu' : 'cuda',
scheduler,
clip_skip: skipped_layers,
vae: vae ?? undefined,
});
const seamless = addSeamless(state, g, denoise, modelLoader, vaeLoader);
addLoRAs(state, g, denoise, modelLoader, seamless, clipSkip, posCond, negCond);
// We might get the VAE from the main model, custom VAE, or seamless node.
const vaeSource = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
if (!isEqual(scaledSize, originalSize)) {
// We are using scaled bbox and need to resize the output image back to the original size.
imageOutput = g.addNode({
id: 'img_resize',
type: 'img_resize',
...originalSize,
is_intermediate: false,
use_cache: false,
});
g.addEdge(l2i, 'image', imageOutput, 'image');
}
const _addedCAs = addControlAdapters(state.canvasV2.controlAdapters.entities, g, denoise, modelConfig.base);
const _addedIPAs = addIPAdapters(state.canvasV2.ipAdapters.entities, g, denoise, modelConfig.base);
const _addedRegions = await addRegions(
manager,
state.canvasV2.regions.entities,
g,
state.canvasV2.document,
state.canvasV2.bbox,
modelConfig.base,
denoise,
posCond,
negCond,
posCondCollect,
negCondCollect
);
// const isHRFAllowed = !addedLayers.some((l) => isInitialImageLayer(l) || isRegionalGuidanceLayer(l));
// if (isHRFAllowed && state.hrf.hrfEnabled) {
// imageOutput = addHRF(state, g, denoise, noise, l2i, vaeSource);
// }
if (state.system.shouldUseNSFWChecker) {
imageOutput = addNSFWChecker(g, imageOutput);
}
if (state.system.shouldUseWatermarker) {
imageOutput = addWatermarker(g, imageOutput);
}
g.setMetadataReceivingNode(imageOutput);
return g.getGraph();
};